[Top][All Lists]
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Commit-gnuradio] gr-error-correcting-codes/src/lib/libecc Makefi...
From: |
Michael Dickens |
Subject: |
[Commit-gnuradio] gr-error-correcting-codes/src/lib/libecc Makefi... |
Date: |
Thu, 20 Jul 2006 17:42:54 +0000 |
CVSROOT: /sources/gnuradio
Module name: gr-error-correcting-codes
Changes by: Michael Dickens <michaelld> 06/07/20 17:42:54
Modified files:
src/lib/libecc : Makefile.am decoder.h encoder.cc encoder.h
encoder_convolutional.cc
encoder_convolutional.h
encoder_convolutional_ic1_ic1.cc
Added files:
src/lib/libecc : code_convolutional_trellis.cc
code_convolutional_trellis.h code_types.h
Log message:
Created trellis to simplify both encoding and decoding. Modified
encoder to take advantage of these changes.
CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/Makefile.am?cvsroot=gnuradio&r1=1.8&r2=1.9
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/decoder.h?cvsroot=gnuradio&r1=1.4&r2=1.5
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/encoder.cc?cvsroot=gnuradio&r1=1.5&r2=1.6
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/encoder.h?cvsroot=gnuradio&r1=1.5&r2=1.6
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/encoder_convolutional.cc?cvsroot=gnuradio&r1=1.11&r2=1.12
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/encoder_convolutional.h?cvsroot=gnuradio&r1=1.7&r2=1.8
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/encoder_convolutional_ic1_ic1.cc?cvsroot=gnuradio&r1=1.6&r2=1.7
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/code_convolutional_trellis.cc?cvsroot=gnuradio&rev=1.1
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/code_convolutional_trellis.h?cvsroot=gnuradio&rev=1.1
http://cvs.savannah.gnu.org/viewcvs/gr-error-correcting-codes/src/lib/libecc/code_types.h?cvsroot=gnuradio&rev=1.1
Patches:
Index: Makefile.am
===================================================================
RCS file:
/sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/Makefile.am,v
retrieving revision 1.8
retrieving revision 1.9
diff -u -b -r1.8 -r1.9
--- Makefile.am 14 Jul 2006 19:49:25 -0000 1.8
+++ Makefile.am 20 Jul 2006 17:42:54 -0000 1.9
@@ -28,19 +28,23 @@
noinst_LTLIBRARIES = libecc.la
libecc_la_SOURCES = \
- encoder.cc code_metrics.cc \
+ code_convolutional_trellis.cc \
+ code_metrics.cc \
+ encoder.cc \
encoder_convolutional.cc \
encoder_convolutional_ic1_ic1.cc \
encoder_turbo.cc \
- decoder.cc decoder_viterbi.cc \
+ decoder.cc \
+ decoder_viterbi.cc \
decoder_viterbi_full_block.cc \
decoder_viterbi_full_block_i1_ic1.cc
noinst_HEADERS = \
- encoder.h code_metrics.h \
+ code_types.h code_metrics.h \
+ code_convolutional_trellis.h \
+ encoder.h encoder_turbo.h \
encoder_convolutional.h \
encoder_convolutional_ic1_ic1.h \
- encoder_turbo.h \
decoder.h decoder_viterbi.h \
decoder_viterbi_full_block.h \
decoder_viterbi_full_block_i1_ic1.h
Index: decoder.h
===================================================================
RCS file: /sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/decoder.h,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -b -r1.4 -r1.5
--- decoder.h 9 Jul 2006 16:15:17 -0000 1.4
+++ decoder.h 20 Jul 2006 17:42:54 -0000 1.5
@@ -23,11 +23,7 @@
#ifndef INCLUDED_DECODER_H
#define INCLUDED_DECODER_H
-#include <sys/types.h>
-
-// the following is the type used for decoder memory
-
-typedef unsigned long memory_t, *memory_ptr_t;
+#include "code_types.h"
// the 'decoder' class is a virtual class upon which all decoder types
// can be built.
Index: encoder.cc
===================================================================
RCS file:
/sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/encoder.cc,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -b -r1.5 -r1.6
--- encoder.cc 18 Jul 2006 02:35:44 -0000 1.5
+++ encoder.cc 20 Jul 2006 17:42:54 -0000 1.6
@@ -25,33 +25,10 @@
#endif
#include <encoder.h>
-#include <assert.h>
#include <iostream>
#define DO_PRINT_DEBUG 0
-#include <mld/n2bs.h>
-
-char
-encoder::sum_bits_mod2
-(memory_t in_mem,
- size_t max_memory)
-{
- // sum the number of set bits, mod 2, for the output bit
-
- // there are faster ways to do this, but this works for now; could
- // certainly do a single inline asm, which most processors provide
- // to deal with summing the bits in an integer.
- // this routine can be overridden by another method if desired.
-
- char t_out_bit = (char)(in_mem & 1);
- for (size_t r = max_memory; r > 0; r--) {
- in_mem >>= 1;
- t_out_bit ^= ((char)(in_mem & 1));
- }
- return (t_out_bit);
-}
-
/*
* encode a certain number of output bits
*
Index: encoder.h
===================================================================
RCS file: /sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/encoder.h,v
retrieving revision 1.5
retrieving revision 1.6
diff -u -b -r1.5 -r1.6
--- encoder.h 12 Jul 2006 23:43:38 -0000 1.5
+++ encoder.h 20 Jul 2006 17:42:54 -0000 1.6
@@ -23,11 +23,7 @@
#ifndef INCLUDED_ENCODER_H
#define INCLUDED_ENCODER_H
-#include <sys/types.h>
-
-// the following is the type used for encoder memory
-
-typedef unsigned long memory_t, *memory_ptr_t;
+#include "code_types.h"
// the 'encoder' class is a virtual class upon which all encoder types
// can be built.
@@ -46,7 +42,6 @@
virtual size_t encode (const char** in_buf,
size_t n_bits_to_input,
char** out_buf);
- virtual char sum_bits_mod2 (memory_t in_mem, size_t max_memory);
/* for remote access to internal info */
Index: encoder_convolutional.cc
===================================================================
RCS file:
/sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/encoder_convolutional.cc,v
retrieving revision 1.11
retrieving revision 1.12
diff -u -b -r1.11 -r1.12
--- encoder_convolutional.cc 18 Jul 2006 02:35:44 -0000 1.11
+++ encoder_convolutional.cc 20 Jul 2006 17:42:54 -0000 1.12
@@ -29,7 +29,7 @@
#include <iostream>
#define DO_TIME_THOUGHPUT 0
-#define DO_PRINT_DEBUG 0
+#define DO_PRINT_DEBUG 1
#include <mld/mld_timer.h>
#include <mld/n2bs.h>
@@ -42,58 +42,31 @@
(int block_size_bits,
int n_code_inputs,
int n_code_outputs,
- const std::vector<int> &code_generators,
+ const std::vector<int>& code_generators,
const std::vector<int>* code_feedback,
bool do_termination,
int start_memory_state,
int end_memory_state)
{
- // do error checking on the input arguments
+ // error checking on the input arguments is done by the trellis class
- // make sure the block length makes sense
-
- if ((block_size_bits < 0) | (block_size_bits > g_max_block_size_bits)) {
- std::cerr << "encoder_convolutional: " <<
- "Requested block length (" << block_size_bits <<
- " bits) must be between 0 and " << g_max_block_size_bits <<
- " bits, with 0 being a streaming encoder.\n";
- assert (0);
- }
-
- // check to make sure the number of input streams makes sense
-
- if ((n_code_inputs <= 0) | (n_code_inputs > g_max_num_streams)) {
- std::cerr << "encoder_convolutional: " <<
- "Requested number of input streams (" <<
- n_code_inputs << ") must be between 1 and " <<
- g_max_num_streams << ".\n";
- assert (0);
- }
-
- // check to make sure the number of output streams makes sense
-
- if ((n_code_outputs <= 0) | (n_code_outputs > g_max_num_streams)) {
- std::cerr << "encoder_convolutional: " <<
- "Requested number of output streams (" <<
- n_code_outputs << ") must be between 1 and " <<
- g_max_num_streams << ".\n";
- assert (0);
- }
-
- // make sure the code_generator is the correct length
-
- if (code_generators.size () !=
- ((size_t)(n_code_inputs * n_code_outputs))) {
- std::cerr << "encoder_convolutional: " <<
- "Number of code generator entries (" << code_generators.size () <<
- ") is not equal to the product of the number of input and output" <<
- " streams (" << (n_code_inputs * n_code_outputs) << ").\n";
- assert (0);
- }
-
- // check for feedback (== NULL or not)
-
- d_do_feedback = (code_feedback != NULL);
+ if (code_feedback)
+ d_trellis = new code_convolutional_trellis
+ (block_size_bits,
+ n_code_inputs,
+ n_code_outputs,
+ code_generators,
+ *code_feedback,
+ do_termination,
+ end_memory_state);
+ else
+ d_trellis = new code_convolutional_trellis
+ (block_size_bits,
+ n_code_inputs,
+ n_code_outputs,
+ code_generators,
+ do_termination,
+ end_memory_state);
// set the initial FSM state to 'init'
@@ -104,397 +77,28 @@
d_block_size_bits = block_size_bits;
d_n_code_inputs = n_code_inputs;
d_n_code_outputs = n_code_outputs;
- d_do_streaming = (block_size_bits == 0);
- d_do_termination = (d_do_streaming == true) ? false : do_termination;
+ d_do_streaming = d_trellis->do_streaming ();
+ d_do_termination = d_trellis->do_termination ();
+ d_total_n_delays = d_trellis->total_n_delays ();
- if (DO_PRINT_DEBUG) {
- std::cout <<
- "d_block_size_bits = " << d_block_size_bits << "\n"
- "d_n_code_inputs = " << d_n_code_inputs << "\n"
- "d_n_code_outputs = " << d_n_code_outputs << "\n"
- "d_do_streaming = " <<
- ((d_do_streaming == true) ? "true" : "false") << "\n"
- "d_do_termination = " <<
- ((d_do_termination == true) ? "true" : "false") << "\n"
- "d_do_feedback = " <<
- ((d_do_feedback == true) ? "true" : "false") << "\n";
- }
-
- // allocate the vectors for doing the encoding. use memory_t (an
- // interger type, at least 32 bits) bits to represent memory and the
- // code, as it makes the operations quite simple the state vectors.
-
- // d_states is a "matrix" [#input by #outputs] containing indices
- // to memory_t's; this is done to make feedback function properly,
- // and doesn't effect the computation time for feedforward. The
- // issue is that any code with the same feedback can use the same
- // memory - thus reducing the actual number of memories required.
- // These overlapping encoders will use the same actual memory, but
- // given that there is no way to know a-priori where they are, use
- // pointers over the full I/O matrix-space to make sure each I/O
- // encoder uses the correct memory.
- // reference the matrix using "maoi(i,o)" ... see .h file.
-
- d_states_ndx.assign (d_n_code_inputs * d_n_code_outputs, 0);
-
- // code generators (feedforward part) are [#inputs x #outputs],
- // always - one for each I/O combination.
- // reference the matrix using "maoi(i,o)" ... see .h file
-
- d_code_generators.assign (d_n_code_inputs * d_n_code_outputs, 0);
-
- // check the feedback for correctness, before anything else, since
- // any feedback impacts the total # of delays in the encoder:
- // without feedback, this is the sum of the individual delays max'ed
- // over each input (if siao) or output (if soai).
-
- if (d_do_feedback == true) {
- memory_t t_OR_all_feedback = 0;
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- memory_t t_in_code = (*code_feedback)[maoi(m,n)];
-
- // OR this feedback with the overall,
- // to check for any delays used at all
-
- t_OR_all_feedback |= t_in_code;
- }
- }
+ // parse the init state
- // check to see if all the feedback entries were either "0" or "1",
- // which implies no feedback; warn the user in that case and reset
- // the do_feedback parameter to false.
+ memory_t t_mask = (memory_t)((2 << d_total_n_delays) - 1);
+ size_t t_n_states = (1 << d_total_n_delays);
- if ((t_OR_all_feedback | 1) == 1) {
+ if (start_memory_state & t_mask) {
std::cout << "encoder_convolutional: Warning: " <<
- "No feedback is required, ignoring feedback.\n";
- d_do_feedback = false;
- }
- }
-
- d_code_feedback.assign (d_n_code_inputs * d_n_code_outputs, 0);
-
- // copy over the FF code generators
-
- for (size_t n = 0; n < d_n_code_outputs; n++)
- for (size_t m = 0; m < d_n_code_inputs; m++)
- d_code_generators[maio(m,n)] = code_generators[maio(m,n)];
-
- // check the input FF (and FB) code generators for correctness, and
- // find the minimum memory configuration: combining via a single
- // input / all outputs (SIAO), or a single output / all inputs (SOAI).
- //
- // for FF only, look over both the SOAI and SIAO realizations to
- // find the minimum total # of delays, and use that realization
- // (SOAI is preferred if total # of delays is equal, since it's much
- // simpler to implement).
- //
- // for FB:
- // for SIAO, check each input row (all outputs for a given input)
- // for unique feedback; duplicate feedback entries can be
- // combined into a single computation to reduce total # of delays.
- // for SOAI: check each output column (all inputs for a given
- // output) for unique feedback; duplicate feedback entries can
- // be combined into a simgle computation (ditto).
-
- // check for SOAI all '0' output
+ "provided end memory state out (" << end_memory_state <<
+ ") is out of the state range [0, " <<
+ (t_n_states-1) << "]; masking off the unused bits.\n";
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- memory_t t_all_inputs_zero = 0;
- for (size_t m = 0; m < d_n_code_inputs; m++)
- t_all_inputs_zero |= d_code_generators[maio(m,n)];
-
- // check this input to see if all encoders were '0'; this might be
- // OK for some codes, but warn the user just in case
-
- if (t_all_inputs_zero == 0) {
- std::cout << "encoder_convolutional: Warning:"
- "Output " << n+1 << " (of " << d_n_code_outputs <<
- ") will always be 0.\n";
- }
- }
-
- // check for SIAO all '0' input
-
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- memory_t t_all_outputs_zero = 0;
- for (size_t n = 0; n < d_n_code_outputs; n++)
- t_all_outputs_zero |= d_code_generators[maio(m,n)];
-
- // check this input to see if all encoders were '0'; this might be
- // OK for some codes, but warn the user just in case
-
- if (t_all_outputs_zero == 0) {
- std::cout << "encoder_convolutional: Warning:"
- "Input " << m+1 << " (of " << d_n_code_inputs <<
- ") will not be used; all encoders are '0'.\n";
- }
+ start_memory_state &= t_mask;
}
- // create the inputs and outputs buffers
+ d_init_state = start_memory_state;
d_current_inputs.assign (d_n_code_inputs, 0);
d_current_outputs.assign (d_n_code_outputs, 0);
-
- // check and compute memory requirements in order to determine which
- // realization uses the least memory; create and save findings to
- // not have to re-do these computations later.
-
- // single output, all inputs (SOAI) realization:
- // reset the global parameters
-
- d_code_feedback.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_n_delays.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_io_num.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_states_ndx.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_max_delay = d_total_n_delays = d_n_memories = 0;
- d_do_encode_soai = true;
-
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- size_t t_max_mem = 0;
- size_t t_n_unique_fb_prev_start = d_n_memories;
-
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- get_memory_requirements (m, n, t_max_mem,
- t_n_unique_fb_prev_start, code_feedback);
- if (d_do_feedback == false) {
- d_states_ndx[maio(m,n)] = n;
- }
- }
- if (d_do_feedback == false) {
- // not feedback; just store memory requirements for this output
- d_total_n_delays += t_max_mem;
- d_n_delays[n] = t_max_mem;
- d_io_num[n] = n;
- }
- }
- if (d_do_feedback == false) {
- d_n_memories = d_n_code_outputs;
- }
-
- // store the parameters for SOAI
-
- std::vector<size_t> t_fb_generators_soai, t_n_delays_soai, t_io_num_soai;
- std::vector<size_t> t_states_ndx_soai;
- size_t t_max_delay_soai, t_total_n_delays_soai, t_n_memories_soai;
-
- t_fb_generators_soai.assign (d_code_feedback.size (), 0);
- t_fb_generators_soai = d_code_feedback;
- t_n_delays_soai.assign (d_n_delays.size (), 0);
- t_n_delays_soai = d_n_delays;
- t_io_num_soai.assign (d_io_num.size (), 0);
- t_io_num_soai = d_io_num;
- t_states_ndx_soai.assign (d_states_ndx.size (), 0);
- t_states_ndx_soai = d_states_ndx;
-
- t_n_memories_soai = d_n_memories;
- t_total_n_delays_soai = d_total_n_delays;
- t_max_delay_soai = d_max_delay;
-
- // single input, all outputs (SIAO) realization
- // reset the global parameters
-
- d_code_feedback.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_n_delays.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_io_num.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_states_ndx.assign (d_n_code_inputs * d_n_code_outputs, 0);
- d_max_delay = d_total_n_delays = d_n_memories = 0;
- d_do_encode_soai = false;
-
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- size_t t_max_mem = 0;
- size_t t_n_unique_fb_prev_start = d_n_memories;
-
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- get_memory_requirements (m, n, t_max_mem,
- t_n_unique_fb_prev_start, code_feedback);
- if (d_do_feedback == false) {
- d_states_ndx[maio(m,n)] = m;
- }
- }
- if (d_do_feedback == false) {
- // not feedback; just store memory requirements for this output
- d_total_n_delays += t_max_mem;
- d_n_delays[m] = t_max_mem;
- d_io_num[m] = m;
- }
- }
- if (d_do_feedback == false) {
- d_n_memories = d_n_code_inputs;
- }
-
- if (DO_PRINT_DEBUG) {
- std::cout <<
- " t_total_n_delays_siao = " << d_total_n_delays << "\n"
- " t_total_n_delays_soai = " << t_total_n_delays_soai << "\n";
- }
-
- // pick which realization to use; soai is preferred since it's faster
- // ... but unfortunately it's also less likely
-
- if (d_total_n_delays < t_total_n_delays_soai) {
- // use siao
- d_do_encode_soai = false;
- // nothing else to do yet, since the global variables already hold
- // the correct values.
- } else {
- // use soai
- d_do_encode_soai = true;
- d_code_feedback = t_fb_generators_soai;
- d_n_delays = t_n_delays_soai;
- d_io_num = t_io_num_soai;
- d_states_ndx = t_states_ndx_soai;
- d_n_memories = t_n_memories_soai;
- d_total_n_delays = t_total_n_delays_soai;
- d_max_delay = t_max_delay_soai;
- }
-
- // make sure the block length makes sense, #2
-
- if ((d_do_streaming == false) & (d_block_size_bits < d_max_delay)) {
- std::cerr << "encoder_convolutional: " <<
- "Requested block length (" << d_block_size_bits <<
- " bit" << (d_block_size_bits > 1 ? "s" : "") <<
- ") must be at least 1 memory length (" << d_max_delay <<
- " bit" << (d_max_delay > 1 ? "s" : "") <<
- " for this code) when doing block coding.\n";
- assert (0);
- }
-
- if (d_do_encode_soai == false) {
- // create the max_mem_mask to be used in encoding
-
- d_max_mem_masks.assign (d_n_memories, 0);
-
- for (size_t m = 0; m < d_n_memories; m++) {
- if (d_n_delays[m] == sizeof (memory_t) * g_num_bits_per_byte)
- d_max_mem_masks[m] = ((memory_t) -1);
- else
- d_max_mem_masks[m] = (memory_t)((2 << (d_n_delays[m])) - 1);
- }
- }
-
- if (DO_PRINT_DEBUG) {
- std::cout <<
- " d_n_memories = " << d_n_memories << "\n"
- " d_total_n_delays = " << d_total_n_delays << "\n"
- " d_max_delay = " << d_max_delay << "\n"
- " d_do_encode_soai = " <<
- ((d_do_encode_soai == true) ? "true" : "false") << "\n";
- }
-
-// FIXME: STILL NEED TO parse START AND END MEMORY STATES;
-
- d_init_states.assign (d_n_memories, 0);
- d_term_states.assign (d_n_memories, 0);
-}
-
-void
-encoder_convolutional::get_memory_requirements
-(size_t m, // input number
- size_t n, // output number
- size_t& t_max_mem,
- size_t& t_n_unique_fb_prev_start,
- const std::vector<int>* code_feedback)
-{
- size_t t_in_code = d_code_generators[maio(m,n)];
-
- // find the memory requirement for this code generator
-
- size_t t_code_mem_ff = max_bit_position (t_in_code);
-
- // check to see if this is bigger than any others in this row/column
-
- if (t_code_mem_ff > t_max_mem)
- t_max_mem = t_code_mem_ff;
-
- if (DO_PRINT_DEBUG) {
- std::cout << "c_g[" << m << "][" << n << "]{" <<
- maio(m,n) << "} = " << n2bs(t_in_code, 8) <<
- ", code_mem = " << t_code_mem_ff;
- }
-
- // check the feedback portion, if it exists;
- // for soai, check all the inputs which generate this output for
- // uniqueness; duplicate entries can be combined to reduce total
- // # of memories as well as required computations.
-
- if (d_do_feedback == true) {
- if (DO_PRINT_DEBUG) {
- std::cout << "\n";
- }
-
- // get the FB code; AND off the LSB for correct functionality
- // during internal computations.
-
- t_in_code = ((memory_t)((*code_feedback)[maio(m,n)]));
- t_in_code &= ((memory_t)(-2));
-
- // find the memory requirement
-
- size_t t_code_mem_fb = max_bit_position (t_in_code);
-
- if (DO_PRINT_DEBUG) {
- std::cout << "c_f[" << m << "][" << n << "]{" <<
- maio(m,n) << "} = " << n2bs(t_in_code, 8) <<
- ", code_mem = " << t_code_mem_fb;
- }
-
- // check to see if this feedback is unique
-
- size_t l_n_unique_fb = t_n_unique_fb_prev_start;
- while (l_n_unique_fb < d_n_memories) {
- if (d_code_feedback[l_n_unique_fb] == t_in_code)
- break;
- l_n_unique_fb++;
- }
- if (l_n_unique_fb == d_n_memories) {
-
- // this is a unique feedback;
-
- d_code_feedback[l_n_unique_fb] = t_in_code;
- d_n_delays[l_n_unique_fb] = t_code_mem_fb;
-
- // increase the number of unique feedback codes
-
- d_n_memories++;
-
- // store memory requirements for this output
-
- if (t_max_mem < t_code_mem_fb)
- t_max_mem = t_code_mem_fb;
- d_total_n_delays += t_max_mem;
-
- if (DO_PRINT_DEBUG) {
- std::cout << ", uq # " << l_n_unique_fb <<
- ", tot_mem = " << d_total_n_delays;
- }
- } else {
- // not a unique feedback, but the FF might require more memory
-
- if (DO_PRINT_DEBUG) {
- std::cout << ", !uq # " << l_n_unique_fb <<
- " = " << d_n_delays[l_n_unique_fb];
- }
-
- if (d_n_delays[l_n_unique_fb] < t_code_mem_ff) {
- d_total_n_delays += (t_code_mem_ff - d_n_delays[l_n_unique_fb]);
- d_n_delays[l_n_unique_fb] = t_code_mem_ff;
-
- if (DO_PRINT_DEBUG) {
- std::cout << " => " << d_n_delays[l_n_unique_fb] <<
- ", tot_mem = " << d_total_n_delays;
- }
- }
- }
- d_io_num[l_n_unique_fb] = ((d_do_encode_soai == true) ? n : m);
- d_states_ndx[maio(m,n)] = l_n_unique_fb;
- }
- if (DO_PRINT_DEBUG) {
- std::cout << "\n";
- }
- if (d_max_delay < t_max_mem)
- d_max_delay = t_max_mem;
}
void
@@ -530,7 +134,7 @@
// copy the init states to the current memory
- d_memory = d_init_states;
+ d_memory = d_init_state;
// if not doing streaming, things to do; else nothing more do
@@ -579,11 +183,11 @@
// number of output bits left
if (d_do_termination == true) {
- encode_loop (in_buf, out_buf, &d_n_output_bits_left, d_max_delay);
+ encode_loop (in_buf, out_buf, &d_n_output_bits_left, d_total_n_delays);
// finished this loop; check for jumping to the next state
- if (d_n_enc_bits == d_max_delay)
+ if (d_n_enc_bits == d_total_n_delays)
d_fsm_state = fsm_enc_conv_init;
} else {
@@ -625,120 +229,16 @@
}
void
-encoder_convolutional::encode_loop_soai
-(const char** in_buf,
- char** out_buf,
- size_t* which_counter,
- size_t how_many)
-{
- // single-output, all inputs; no feedback
-
- if (DO_PRINT_DEBUG) {
- std::cout << "Starting encode_loop_soai.\n";
- }
-
- while (((*which_counter) > 0) & (d_n_enc_bits < how_many)) {
- if (DO_PRINT_DEBUG) {
- std::cout << "*w_c = " << (*which_counter) << ", "
- "# enc_bits = " << d_n_enc_bits << " of " << how_many << ".\n"
- "Getting new inputs.\n";
- }
-
- // get the next set of input bits from all streams;
- // written into d_current_inputs
-
- get_next_inputs (in_buf);
-
- // shift memories down by 1 bit to make room for feedback; no
- // masking required.
-
- for (size_t p = 0; p < d_n_memories; p++) {
- if (DO_PRINT_DEBUG) {
- std::cout << "m_i[" << p << "] = " <<
- n2bs(d_memory[p], 1+d_n_delays[p]);
- }
-
- d_memory[p] >>= 1;
-
- if (DO_PRINT_DEBUG) {
- std::cout << " >>= 1 -> " <<
- n2bs(d_memory[p], 1+d_n_delays[p]) << "\n";
- }
- }
-
- // for each input bit, if that bit's a '1', then XOR the code
- // generators into the correct state's memory.
-
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- if (DO_PRINT_DEBUG) {
- std::cout << "c_i[" << m << "] = " <<
- n2bs(d_current_inputs[m],1);
- }
- if (d_current_inputs[m] == 1) {
- if (DO_PRINT_DEBUG) {
- std::cout << "\n";
- }
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- if (DO_PRINT_DEBUG) {
- std::cout << "m_i[s_ndx[" << m << "][" << n << "] == " <<
- d_states_ndx[maio(m,n)] << "] = " <<
- n2bs(d_memory[d_states_ndx[maio(m,n)]],
- 1+d_n_delays[d_states_ndx[maio(m,n)]]);
- }
-
- d_memory[d_states_ndx[maio(m,n)]] ^= d_code_generators[maio(m,n)];
-
- if (DO_PRINT_DEBUG) {
- std::cout << " ^= c_g[][] == " <<
- n2bs(d_code_generators[maio(m,n)],
- 1+d_n_delays[d_states_ndx[maio(m,n)]]) <<
- " -> " << n2bs(d_memory[d_states_ndx[maio(m,n)]],
- 1+d_n_delays[d_states_ndx[maio(m,n)]]) << "\n";
- }
- }
- } else if (DO_PRINT_DEBUG) {
- std::cout << " ... nothing to do\n";
- }
- }
-
- for (size_t p = 0; p < d_n_code_outputs; p++) {
- d_current_outputs[p] = 0;
- }
-
- // create the output bits, by XOR'ing the individual unique
- // memory(ies) into the correct output bit
-
- for (size_t p = 0; p < d_n_memories; p++) {
- d_current_outputs[d_io_num[p]] ^= ((char)(d_memory[p] & 1));
- }
-
- // write the bits in d_current_outputs into the output buffer
-
- write_output_bits (out_buf);
-
- // increment the number of encoded bits for the current block, and
- // the total number of bits for this running of "encode()"
-
- d_n_enc_bits++;
- d_total_n_enc_bits++;
- }
-
- if (DO_PRINT_DEBUG) {
- std::cout << "ending encode_loop_soai.\n";
- }
-}
-
-void
-encoder_convolutional::encode_loop_soai_fb
+encoder_convolutional::encode_loop
(const char** in_buf,
char** out_buf,
size_t* which_counter,
size_t how_many)
{
- // single-output, all inputs; with feedback
+ // generic encode_loop
if (DO_PRINT_DEBUG) {
- std::cout << "Starting encode_loop_soai_fb.\n";
+ std::cout << "Starting encode_loop.\n";
}
while (((*which_counter) > 0) & (d_n_enc_bits < how_many)) {
@@ -753,217 +253,11 @@
get_next_inputs (in_buf);
- // shift memories down by 1 bit to make room for feedback; no
- // masking required.
-
- for (size_t p = 0; p < d_n_memories; p++) {
- if (DO_PRINT_DEBUG) {
- std::cout << "m_i[" << p << "] = " << d_memory[p];
- }
-
- d_memory[p] >>= 1;
-
- if (DO_PRINT_DEBUG) {
- std::cout << " -> " << d_memory[p] << "\n";
- }
- }
-
- // for each input bit, if that bit's a '1', then XOR the code
- // generators into the correct state's memory.
-
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- if (d_current_inputs[m] == 1) {
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- d_memory[d_states_ndx[maio(m,n)]] ^= d_code_generators[maio(m,n)];
- }
- }
- }
-
- for (size_t p = 0; p < d_n_code_outputs; p++) {
- d_current_outputs[p] = 0;
- }
-
- // create the output bits, by XOR'ing the individual unique
- // memory(ies) into the correct output bit
+ // use the trellis to do the encoding;
+ // updates the input memory to the new memory state for the given input
+ // and writes the output bits to the current_outputs
- for (size_t p = 0; p < d_n_memories; p++) {
- d_current_outputs[d_io_num[p]] ^= ((char)(d_memory[p] & 1));
- }
-
- // now that the output bits are fully created, XOR the FB back
- // into the memories; the feedback bits have the LSB (&1) masked
- // off already so that it doesn't contribute.
-
- for (size_t p = 0; p < d_n_memories; p++) {
- if (d_current_outputs[d_io_num[p]] == 1) {
- d_memory[p] ^= d_code_feedback[p];
- }
- }
-
- // write the bits in d_current_outputs into the output buffer
-
- write_output_bits (out_buf);
-
- // increment the number of encoded bits for the current block, and
- // the total number of bits for this running of "encode()"
-
- d_n_enc_bits++;
- d_total_n_enc_bits++;
- }
-
- if (DO_PRINT_DEBUG) {
- std::cout << "ending encode_loop_soai.\n";
- }
-}
-
-void
-encoder_convolutional::encode_loop_siao
-(const char** in_buf,
- char** out_buf,
- size_t* which_counter,
- size_t how_many)
-{
- // single input, all outputs; no feedback
-
- if (DO_PRINT_DEBUG) {
- std::cout << "starting encode_loop_siao.\n";
- }
-
- while (((*which_counter) > 0) & (d_n_enc_bits < how_many)) {
- if (DO_PRINT_DEBUG) {
- std::cout << "*w_c = " << (*which_counter) << ", "
- "# enc_bits = " << d_n_enc_bits << " of " << how_many << ".\n"
- "Getting new inputs.\n";
- }
-
- // get the next set of input bits from all streams;
- // written into d_current_inputs
-
- get_next_inputs (in_buf);
-
- // update the memories with the current input bits;
- // pre-shift delays instead of post-shift to gather loops.
-
- // for each unique memory (1 per input), shift the delays and mask
- // off the extra high bits; then XOR in the input bit.
-
- for (size_t p = 0; p < d_n_memories; p++) {
- if (DO_PRINT_DEBUG) {
- std::cout << "t_m[" << p << "] = (" <<
- n2bs(d_memory[p],d_n_delays[p]+1) <<
- " << 1) & mask = " <<
- n2bs(d_max_mem_masks[p],d_n_delays[p]+1);
- }
-
- memory_t t_mem = (d_memory[p] << 1) & d_max_mem_masks[p];
- d_memory[p] = t_mem ^ ((memory_t)(d_current_inputs[d_io_num[p]]));
-
- if (DO_PRINT_DEBUG) {
- std::cout << ", -> t_m = " << n2bs(t_mem,d_n_delays[p]+1) <<
- ", t_i[" << d_io_num[p] << "] = " <<
- n2bs(d_current_inputs[d_io_num[p]],2) <<
- " -> d_m_o = " <<
- n2bs(d_memory[p],d_n_delays[p]+1) << "\n";
- }
- }
-
- // create the output bits: for each output, loop over all inputs,
- // find the output bits for each encoder, and XOR each together
- // then sum (would usually be sum then XOR, but they're mutable in
- // base-2 and it's faster this way).
-
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- memory_t t_mem = 0;
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- t_mem ^= ((d_memory[d_states_ndx[maio(m,n)]]) &
- d_code_generators[maio(m,n)]);
- }
- d_current_outputs[n] = sum_bits_mod2 (t_mem, d_max_delay);
- }
-
-#if 0
- for (size_t p = 0; p < d_n_memories; p++) {
- memory_t t_mem = (d_memory[p] << 1);
- d_memory[p] = (t_mem & d_max_mem_masks[p]);
- }
-#endif
-
- // write the bits in d_current_outputs into the output buffer
-
- write_output_bits (out_buf);
-
- // increment the number of encoded bits for the current block, and
- // the total number of bits for this running of "encode()"
-
- d_n_enc_bits++;
- d_total_n_enc_bits++;
- }
-
- if (DO_PRINT_DEBUG) {
- std::cout << "ending encode_loop_siao.\n";
- }
-}
-
-void
-encoder_convolutional::encode_loop_siao_fb
-(const char** in_buf,
- char** out_buf,
- size_t* which_counter,
- size_t how_many)
-{
- // single input, all outputs; with feedback
-
- if (DO_PRINT_DEBUG) {
- std::cout << "starting encode_loop_siao_fb.\n";
- }
-
- while (((*which_counter) > 0) & (d_n_enc_bits < how_many)) {
- if (DO_PRINT_DEBUG) {
- std::cout << "*w_c = " << (*which_counter) << ", "
- "# enc_bits = " << d_n_enc_bits << " of " << how_many << ".\n"
- "Getting new inputs.\n";
- }
-
- // get the next set of input bits from all streams;
- // written into d_current_inputs
-
- get_next_inputs (in_buf);
-
- // update the memories with the current input bits;
- // pre-shift delays instead of post-shift to gather loops.
-
- // for each unique memory (1 per input), shift the delays and mask
- // off the extra high bits; then XOR in the input bit.
- // with FB: find the feedback bit, and OR it into the input bit's slot;
-
- for (size_t p = 0; p < d_n_memories; p++) {
- memory_t t_mem = (d_memory[p] << 1) & d_max_mem_masks[p];
- memory_t t_fb = t_mem & d_code_feedback[p];
- char t_fb_bit = sum_bits_mod2 (t_fb, d_max_delay);
- t_mem |= ((memory_t) t_fb_bit);
- d_memory[p] = t_mem ^ ((memory_t)(d_current_inputs[d_io_num[p]]));
- }
-
- // create the output bits: for each output, loop over all inputs,
- // find the output bits for each encoder, and XOR each together
- // then sum (would usually be sum then XOR, but they're mutable in
- // base-2 and it's faster this way).
-
- for (size_t n = 0; n < d_n_code_outputs; n++) {
- memory_t t_mem = 0;
- for (size_t m = 0; m < d_n_code_inputs; m++) {
- t_mem ^= ((d_memory[d_states_ndx[maio(m,n)]]) &
- d_code_generators[maio(m,n)]);
- }
- d_current_outputs[n] = sum_bits_mod2 (t_mem, d_max_delay);
- }
-
-#if 0
- for (size_t p = 0; p < d_n_memories; p++) {
- memory_t t_mem = (d_memory[p] << 1);
- d_memory[p] = (t_mem & d_max_mem_masks[p]);
- }
-#endif
+ d_trellis->encode_lookup (d_memory, d_current_inputs, d_current_outputs);
// write the bits in d_current_outputs into the output buffer
@@ -977,7 +271,7 @@
}
if (DO_PRINT_DEBUG) {
- std::cout << "ending encode_loop_siao_fb.\n";
+ std::cout << "ending encode_loop.\n";
}
}
Index: encoder_convolutional.h
===================================================================
RCS file:
/sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/encoder_convolutional.h,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -b -r1.7 -r1.8
--- encoder_convolutional.h 16 Jul 2006 20:53:59 -0000 1.7
+++ encoder_convolutional.h 20 Jul 2006 17:42:54 -0000 1.8
@@ -24,7 +24,7 @@
#define INCLUDED_ENCODER_CONVOLUTIONAL_H
#include "encoder.h"
-#include <vector>
+#include "code_convolutional_trellis.h"
class encoder_convolutional : public encoder
{
@@ -74,7 +74,7 @@
*/
public:
- encoder_convolutional
+ inline encoder_convolutional
(int block_size_bits,
int n_code_inputs,
int n_code_outputs,
@@ -86,7 +86,7 @@
n_code_inputs,
n_code_outputs,
code_generators,
- NULL,
+ 0,
do_termination,
start_memory_state,
end_memory_state);};
@@ -106,12 +106,12 @@
* the code_generator.
*/
- encoder_convolutional
+ inline encoder_convolutional
(int block_size_bits,
int n_code_inputs,
int n_code_outputs,
- const std::vector<int> &code_generators,
- const std::vector<int> &code_feedback,
+ const std::vector<int>& code_generators,
+ const std::vector<int>& code_feedback,
bool do_termination = true,
int start_memory_state = 0,
int end_memory_state = 0)
@@ -124,12 +124,12 @@
start_memory_state,
end_memory_state);};
- virtual ~encoder_convolutional () {};
+ virtual ~encoder_convolutional () {delete d_trellis;};
/* for remote access to internal info */
inline const bool do_termination () {return (d_do_termination);};
- inline const bool do_feedback () {return (d_do_feedback);};
+ inline const bool do_feedback () {return (d_trellis->do_feedback());};
inline const bool do_streaming () {return (d_do_streaming);};
inline const size_t total_n_delays () {return (d_total_n_delays);};
@@ -150,50 +150,6 @@
fsm_enc_conv_init, fsm_enc_conv_doing_input, fsm_enc_conv_doing_term
};
-/*
- * maio(i,o): matrix access into a vector, knowing the # of code
- * outputs (from inside the class). References into a vector with
- * code inputs ordered by code output.
- *
- * 'i' is the 1st dimension - faster memory - the code input
- * 'o' is the 2nd dimension - slower memory - the code output
- *
- * returns ((o*n_code_inputs) + i)
- */
-
- inline size_t maio(size_t i, size_t o) {return ((o*d_n_code_inputs) + i);};
-
-/*
- * maoi(i,o): matrix access into a vector, knowing the # of code
- * inputs (from inside the class). References into a vector with
- * code outputs ordered by code input.
- *
- * 'o' is the 1st dimension - faster memory - the code output
- * 'i' is the 2nd dimension - slower memory - the code input
- *
- * returns ((i*n_code_outputs) + o)
- */
-
- inline size_t maoi(size_t i, size_t o) {return ((i*d_n_code_outputs) + o);};
-
-/*
- * max_bit_position (x): returns the bit-number of the highest "1" bit
- * in the provided value, such that the LSB would return 0 and the MSB
- * of a long would return 31.
- */
-
- inline size_t max_bit_position (memory_t x)
- {
- size_t t_code_mem = 0;
- memory_t t_in_code = x >> 1;
- while (t_in_code != 0) {
- t_in_code >>= 1;
- t_code_mem++;
- }
-
- return (t_code_mem);
- }
-
// methods defined in this class
void encoder_convolutional_init (int block_size_bits,
@@ -207,30 +163,7 @@
virtual void encode_private (const char** in_buf, char** out_buf);
- inline void encode_loop (const char** in_buf, char** out_buf,
- size_t* which_counter, size_t how_many) {
- if (d_do_encode_soai == true) {
- if (d_do_feedback == true) {
- encode_loop_soai_fb (in_buf, out_buf, which_counter, how_many);
- } else {
- encode_loop_soai (in_buf, out_buf, which_counter, how_many);
- }
- } else {
- if (d_do_feedback == true) {
- encode_loop_siao_fb (in_buf, out_buf, which_counter, how_many);
- } else {
- encode_loop_siao (in_buf, out_buf, which_counter, how_many);
- }
- }
- };
-
- virtual void encode_loop_soai (const char** in_buf, char** out_buf,
- size_t* which_counter, size_t how_many);
- virtual void encode_loop_siao (const char** in_buf, char** out_buf,
- size_t* which_counter, size_t how_many);
- virtual void encode_loop_soai_fb (const char** in_buf, char** out_buf,
- size_t* which_counter, size_t how_many);
- virtual void encode_loop_siao_fb (const char** in_buf, char** out_buf,
+ virtual void encode_loop (const char** in_buf, char** out_buf,
size_t* which_counter, size_t how_many);
inline void get_next_inputs (const char** in_buf) {
@@ -265,67 +198,16 @@
// variables
fsm_enc_conv_t d_fsm_state;
- bool d_do_streaming, d_do_termination, d_do_feedback, d_do_encode_soai;
-
- // "max_delay" is the max # of delays for all unique generators (ff and fb),
- // needed to determine (e.g.) termination
-
- size_t d_max_delay;
-
- // "n_memories" is the number of unique memories as determined by
- // either the feedforward or feedback generators (not both). For
- // FF, this number equals either the number of code inputs (for
- // SIAO) or outputs (for SOAI).
-
- size_t d_n_memories;
+ bool d_do_streaming, d_do_termination;
// "total_n_delays" is the total # of delays, needed to determine the
// # of states in the decoder
size_t d_total_n_delays;
- // "code generators" are stored internally in "maXY(i,o)" order this
- // allows for looping over all a single output and computing all
- // input parts sequentially.
-
- std::vector<memory_t> d_code_generators;
-
- // "feedback" are found as "d_n_memories" unique entries, and stored
- // in at most 1 entry per I/O combination. Listed in the same order
- // as "d_io_num" entries show.
-
- std::vector<memory_t> d_code_feedback;
-
- // "n_delays" is a vector, the number of delays for the FB generator
- // in the same [] location; also relates directly to the
- // "max_mem_masks" in the same [] location.
+ // the current state of the encoder (all delays / memories)
- std::vector<size_t> d_n_delays;
-
- // "io_num" is a vector, mapping which FB in SIAO goes with which
- // input, or which FB in SOAI goes with which output
-
- std::vector<size_t> d_io_num;
-
- // "max_mem_masks" are the memory masks, one per unique FB for SIAO;
- // otherwise not used.
-
- std::vector<memory_t> d_max_mem_masks;
-
- // "states_ndx" is a "matrix" whose contents are the indices into
- // the "io_num" vector, telling which input goes with which
- // state; uses the same "maXY(i,o)" as the code generators.
-
- std::vector<size_t> d_states_ndx;
-
- // "memory" are the actual stored delay bits, one memory for each
- // unique FF or FB code generator;
- // "init_states" are the user-provided init states - and
- // "term_states" are the user-provided termination states -
- // interpreted w/r.t. the actual FF and FB code generators and SOAI
- // / SIAO realization;
-
- std::vector<memory_t> d_memory, d_init_states, d_term_states;
+ memory_t d_memory;
// "inputs" are the current input bits, in the LSB (&1) of each "char"
@@ -334,6 +216,15 @@
// "outputs" are the current output bits, in the LSB (&1) of each "char"
std::vector<char> d_current_outputs;
+
+ // "trellis" is the code trellis for the given input parameters
+
+ code_convolutional_trellis* d_trellis;
+
+ // "init_states" are the user-provided init states,
+ // interpreted w/r.t. the actual trellis;
+
+ memory_t d_init_state;
};
#endif /* INCLUDED_ENCODER_CONVOLUTIONAL_H */
Index: encoder_convolutional_ic1_ic1.cc
===================================================================
RCS file:
/sources/gnuradio/gr-error-correcting-codes/src/lib/libecc/encoder_convolutional_ic1_ic1.cc,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -b -r1.6 -r1.7
--- encoder_convolutional_ic1_ic1.cc 18 Jul 2006 02:35:44 -0000 1.6
+++ encoder_convolutional_ic1_ic1.cc 20 Jul 2006 17:42:54 -0000 1.7
@@ -74,7 +74,7 @@
size_t n_extra = 0;
if (d_fsm_state == fsm_enc_conv_doing_term) {
- n_extra = d_max_delay - d_n_enc_bits;
+ n_extra = d_total_n_delays - d_n_enc_bits;
}
// check to see if this is enough; return 0 if it is.
@@ -88,7 +88,7 @@
// find the number of blocks of data which could be processed
- size_t t_n_output_bits_per_block = d_block_size_bits + d_max_delay;
+ size_t t_n_output_bits_per_block = d_block_size_bits + d_total_n_delays;
// get the base number of input items required for the given
// number of blocks to be generated
Index: code_convolutional_trellis.cc
===================================================================
RCS file: code_convolutional_trellis.cc
diff -N code_convolutional_trellis.cc
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ code_convolutional_trellis.cc 20 Jul 2006 17:42:54 -0000 1.1
@@ -0,0 +1,1028 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2006 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "code_convolutional_trellis.h"
+#include <assert.h>
+#include <iostream>
+
+#define DO_TIME_THOUGHPUT 0
+#define DO_PRINT_DEBUG 1
+#define DO_PRINT_DEBUG_ENCODE 1
+
+#include <mld/mld_timer.h>
+#include <mld/n2bs.h>
+
+static const int g_max_block_size_bits = 10000000;
+static const int g_max_num_streams = 10;
+static const int g_num_bits_per_byte = 8;
+
+/*
+ * sum_bits_mod2:
+ * sum the number of set bits, mod 2, for the output bit
+ */
+
+char
+code_convolutional_trellis::sum_bits_mod2
+(memory_t in_mem,
+ size_t max_memory)
+{
+ // there are faster ways to do this, but this works for now; could
+ // certainly do a single inline asm, which most processors provide
+ // to deal with summing the bits in an integer.
+ // this routine can be overridden by another method if desired.
+
+ char t_out_bit = (char)(in_mem & 1);
+ for (size_t r = max_memory; r > 0; r--) {
+ in_mem >>= 1;
+ t_out_bit ^= ((char)(in_mem & 1));
+ }
+ return (t_out_bit);
+}
+
+void
+code_convolutional_trellis::code_convolutional_trellis_init
+(int block_size_bits,
+ int n_code_inputs,
+ int n_code_outputs,
+ const std::vector<int>& code_generators,
+ const std::vector<int>* code_feedback,
+ bool do_termination,
+ int end_memory_state)
+{
+ // do error checking on the input arguments
+
+ // make sure the block length makes sense
+
+ if ((block_size_bits < 0) | (block_size_bits > g_max_block_size_bits)) {
+ std::cerr << "code_convolutional_trellis: " <<
+ "Requested block length (" << block_size_bits <<
+ " bits) must be between 0 and " << g_max_block_size_bits <<
+ " bits, with 0 being a streaming encoder.\n";
+ assert (0);
+ }
+
+ // check to make sure the number of input streams makes sense
+
+ if ((n_code_inputs <= 0) | (n_code_inputs > g_max_num_streams)) {
+ std::cerr << "code_convolutional_trellis: " <<
+ "Requested number of input streams (" <<
+ n_code_inputs << ") must be between 1 and " <<
+ g_max_num_streams << ".\n";
+ assert (0);
+ }
+
+ // check to make sure the number of output streams makes sense
+
+ if ((n_code_outputs <= 0) | (n_code_outputs > g_max_num_streams)) {
+ std::cerr << "code_convolutional_trellis: " <<
+ "Requested number of output streams (" <<
+ n_code_outputs << ") must be between 1 and " <<
+ g_max_num_streams << ".\n";
+ assert (0);
+ }
+
+ // make sure the code_generator is the correct length
+
+ if (code_generators.size () !=
+ ((size_t)(n_code_inputs * n_code_outputs))) {
+ std::cerr << "code_convolutional_trellis: " <<
+ "Number of code generator entries (" << code_generators.size () <<
+ ") is not equal to the product of the number of input and output" <<
+ " streams (" << (n_code_inputs * n_code_outputs) << ").\n";
+ assert (0);
+ }
+
+ // check for feedback (== NULL or not)
+
+ d_do_feedback = (code_feedback != NULL);
+
+ // create the class block variables
+
+ d_block_size_bits = block_size_bits;
+ d_n_code_inputs = n_code_inputs;
+ d_n_code_outputs = n_code_outputs;
+ d_do_streaming = (block_size_bits == 0);
+ d_do_termination = (d_do_streaming == true) ? false : do_termination;
+
+ if (DO_PRINT_DEBUG) {
+ std::cout <<
+ "d_block_size_bits = " << d_block_size_bits << "\n"
+ "d_n_code_inputs = " << d_n_code_inputs << "\n"
+ "d_n_code_outputs = " << d_n_code_outputs << "\n"
+ "d_do_streaming = " <<
+ ((d_do_streaming == true) ? "true" : "false") << "\n"
+ "d_do_termination = " <<
+ ((d_do_termination == true) ? "true" : "false") << "\n"
+ "d_do_feedback = " <<
+ ((d_do_feedback == true) ? "true" : "false") << "\n";
+ }
+
+ // allocate the vectors for doing the encoding. use memory_t (an
+ // interger type, at least 32 bits) bits to represent memory and the
+ // code, as it makes the operations quite simple the state vectors.
+
+ // d_states is a "matrix" [#input by #outputs] containing indices
+ // to memory_t's; this is done to make feedback function properly,
+ // and doesn't effect the computation time for feedforward. The
+ // issue is that any code with the same feedback can use the same
+ // memory - thus reducing the actual number of memories required.
+ // These overlapping encoders will use the same actual memory, but
+ // given that there is no way to know a-priori where they are, use
+ // pointers over the full I/O matrix-space to make sure each I/O
+ // encoder uses the correct memory.
+ // reference the matrix using "maoi(i,o)" ... see .h file.
+
+ // code generators (feedforward part) are [#inputs x #outputs],
+ // always - one for each I/O combination.
+ // reference the matrix using "maoi(i,o)" ... see .h file
+
+ d_code_generators.assign (d_n_code_inputs * d_n_code_outputs, 0);
+
+ // check the feedback for correctness, before anything else, since
+ // any feedback impacts the total # of delays in the encoder:
+ // without feedback, this is the sum of the individual delays max'ed
+ // over each input (if siao) or output (if soai).
+
+ if (d_do_feedback == true) {
+ memory_t t_OR_all_feedback = 0;
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ memory_t t_in_code = (*code_feedback)[maoi(m,n)];
+
+ // OR this feedback with the overall,
+ // to check for any delays used at all
+
+ t_OR_all_feedback |= t_in_code;
+ }
+ }
+
+ // check to see if all the feedback entries were either "0" or "1",
+ // which implies no feedback; warn the user in that case and reset
+ // the do_feedback parameter to false.
+
+ if ((t_OR_all_feedback | 1) == 1) {
+ std::cout << "code_convolutional_trellis: Warning: " <<
+ "No feedback is required, ignoring feedback.\n";
+ d_do_feedback = false;
+ }
+ }
+
+ // copy over the FF code generators
+
+ for (size_t n = 0; n < d_n_code_outputs; n++)
+ for (size_t m = 0; m < d_n_code_inputs; m++)
+ d_code_generators[maio(m,n)] = code_generators[maio(m,n)];
+
+ // check the input FF (and FB) code generators for correctness, and
+ // find the minimum memory configuration: combining via a single
+ // input / all outputs (SIAO), or a single output / all inputs (SOAI).
+ //
+ // for FF only, look over both the SOAI and SIAO realizations to
+ // find the minimum total # of delays, and use that realization
+ // (SOAI is preferred if total # of delays is equal, since it's much
+ // simpler to implement).
+ //
+ // for FB:
+ // for SIAO, check each input row (all outputs for a given input)
+ // for unique feedback; duplicate feedback entries can be
+ // combined into a single computation to reduce total # of delays.
+ // for SOAI: check each output column (all inputs for a given
+ // output) for unique feedback; duplicate feedback entries can
+ // be combined into a simgle computation (ditto).
+
+ // check for SOAI all '0' output
+
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ memory_t t_all_inputs_zero = 0;
+ for (size_t m = 0; m < d_n_code_inputs; m++)
+ t_all_inputs_zero |= d_code_generators[maio(m,n)];
+
+ // check this input to see if all encoders were '0'; this might be
+ // OK for some codes, but warn the user just in case
+
+ if (t_all_inputs_zero == 0) {
+ std::cout << "code_convolutional_trellis: Warning:"
+ "Output " << n+1 << " (of " << d_n_code_outputs <<
+ ") will always be 0.\n";
+ }
+ }
+
+ // check for SIAO all '0' input
+
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ memory_t t_all_outputs_zero = 0;
+ for (size_t n = 0; n < d_n_code_outputs; n++)
+ t_all_outputs_zero |= d_code_generators[maio(m,n)];
+
+ // check this input to see if all encoders were '0'; this might be
+ // OK for some codes, but warn the user just in case
+
+ if (t_all_outputs_zero == 0) {
+ std::cout << "code_convolutional_trellis: Warning:"
+ "Input " << m+1 << " (of " << d_n_code_inputs <<
+ ") will not be used; all encoders are '0'.\n";
+ }
+ }
+
+ // check and compute memory requirements in order to determine which
+ // realization uses the least memory; create and save findings to
+ // not have to re-do these computations later.
+
+ // single output, all inputs (SOAI) realization:
+ // reset the global parameters
+
+ d_code_feedback.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_n_delays.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_io_num.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_states_ndx.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_max_delay = d_total_n_delays = d_n_memories = 0;
+ d_do_encode_soai = true;
+
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ size_t t_max_mem = 0;
+ size_t t_n_unique_fb_prev_start = d_n_memories;
+
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ get_memory_requirements (m, n, t_max_mem,
+ t_n_unique_fb_prev_start, code_feedback);
+ if (d_do_feedback == false) {
+ d_states_ndx[maio(m,n)] = n;
+ }
+ }
+ if (d_do_feedback == false) {
+ // not feedback; just store memory requirements for this output
+ d_total_n_delays += t_max_mem;
+ d_n_delays[n] = t_max_mem;
+ d_io_num[n] = n;
+ }
+ }
+ if (d_do_feedback == false) {
+ d_n_memories = d_n_code_outputs;
+ }
+
+ // store the parameters for SOAI
+
+ std::vector<size_t> t_fb_generators_soai, t_n_delays_soai, t_io_num_soai;
+ std::vector<size_t> t_states_ndx_soai;
+ size_t t_max_delay_soai, t_total_n_delays_soai, t_n_memories_soai;
+
+ t_fb_generators_soai.assign (d_code_feedback.size (), 0);
+ t_fb_generators_soai = d_code_feedback;
+ t_n_delays_soai.assign (d_n_delays.size (), 0);
+ t_n_delays_soai = d_n_delays;
+ t_io_num_soai.assign (d_io_num.size (), 0);
+ t_io_num_soai = d_io_num;
+ t_states_ndx_soai.assign (d_states_ndx.size (), 0);
+ t_states_ndx_soai = d_states_ndx;
+
+ t_n_memories_soai = d_n_memories;
+ t_total_n_delays_soai = d_total_n_delays;
+ t_max_delay_soai = d_max_delay;
+
+ // single input, all outputs (SIAO) realization
+ // reset the global parameters
+
+ d_code_feedback.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_n_delays.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_io_num.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_states_ndx.assign (d_n_code_inputs * d_n_code_outputs, 0);
+ d_max_delay = d_total_n_delays = d_n_memories = 0;
+ d_do_encode_soai = false;
+
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ size_t t_max_mem = 0;
+ size_t t_n_unique_fb_prev_start = d_n_memories;
+
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ get_memory_requirements (m, n, t_max_mem,
+ t_n_unique_fb_prev_start, code_feedback);
+ if (d_do_feedback == false) {
+ d_states_ndx[maio(m,n)] = m;
+ }
+ }
+ if (d_do_feedback == false) {
+ // not feedback; just store memory requirements for this output
+ d_total_n_delays += t_max_mem;
+ d_n_delays[m] = t_max_mem;
+ d_io_num[m] = m;
+ }
+ }
+ if (d_do_feedback == false) {
+ d_n_memories = d_n_code_inputs;
+ }
+
+ if (DO_PRINT_DEBUG) {
+ std::cout <<
+ " t_total_n_delays_siao = " << d_total_n_delays << "\n"
+ " t_total_n_delays_soai = " << t_total_n_delays_soai << "\n";
+ }
+
+ // pick which realization to use; soai is preferred since it's faster
+ // ... but unfortunately it's also less likely
+
+ if (d_total_n_delays < t_total_n_delays_soai) {
+ // use siao
+ // nothing else to do, since the global variables already hold
+ // the correct values.
+ } else {
+ // use soai
+ d_do_encode_soai = true;
+ d_code_feedback = t_fb_generators_soai;
+ d_n_delays = t_n_delays_soai;
+ d_io_num = t_io_num_soai;
+ d_states_ndx = t_states_ndx_soai;
+ d_n_memories = t_n_memories_soai;
+ d_total_n_delays = t_total_n_delays_soai;
+ d_max_delay = t_max_delay_soai;
+ }
+
+ // make sure the block length makes sense, #2
+
+ if ((d_do_streaming == false) & (d_block_size_bits < d_max_delay)) {
+ std::cerr << "code_convolutional_trellis: " <<
+ "Requested block length (" << d_block_size_bits <<
+ " bit" << (d_block_size_bits > 1 ? "s" : "") <<
+ ") must be at least 1 memory length (" << d_max_delay <<
+ " bit" << (d_max_delay > 1 ? "s" : "") <<
+ " for this code) when doing block coding.\n";
+ assert (0);
+ }
+
+ // check & mask off the init states
+
+ d_n_states = (1 << d_total_n_delays);
+ d_n_input_combinations = (1 << d_n_code_inputs);
+
+ memory_t t_mask = (memory_t)((2 << d_total_n_delays) - 1);
+
+ if (end_memory_state & t_mask) {
+ std::cout << "code_convolutional_trellis: Warning: " <<
+ "provided end memory state out (" << end_memory_state <<
+ ") is out of the state range [0, " <<
+ (d_n_states-1) << "]; masking off the unused bits.\n";
+
+ end_memory_state &= t_mask;
+ }
+
+ // create the max_mem_mask to be used in encoding
+
+ d_max_mem_masks.assign (d_n_memories, 0);
+
+ for (size_t m = 0; m < d_n_memories; m++) {
+ if (d_n_delays[m] == sizeof (memory_t) * g_num_bits_per_byte)
+ d_max_mem_masks[m] = ((memory_t) -1);
+ else
+ d_max_mem_masks[m] = (memory_t)((2 << (d_n_delays[m])) - 1);
+ }
+
+ if (DO_PRINT_DEBUG) {
+ std::cout <<
+ " d_n_memories = " << d_n_memories << "\n"
+ " d_total_n_delays = " << d_total_n_delays << "\n"
+ " d_max_delay = " << d_max_delay << "\n"
+ " d_do_encode_soai = " <<
+ ((d_do_encode_soai == true) ? "true" : "false") << "\n";
+ }
+
+ // zero the memories
+
+ d_memory.assign (d_n_memories, 0);
+
+ // create the inputs and outputs buffers
+
+ d_current_inputs.assign (d_n_code_inputs, 0);
+ d_current_outputs.assign (d_n_code_outputs, 0);
+
+ // create the trellis for this code:
+
+ create_trellis ();
+
+ if (d_do_termination == true) {
+
+ // create the termination lookup table
+
+ create_termination_table (end_memory_state);
+ }
+}
+
+void
+code_convolutional_trellis::get_memory_requirements
+(size_t m, // input number
+ size_t n, // output number
+ size_t& t_max_mem,
+ size_t& t_n_unique_fb_prev_start,
+ const std::vector<int>* code_feedback)
+{
+ size_t t_in_code = d_code_generators[maio(m,n)];
+
+ // find the memory requirement for this code generator
+
+ size_t t_code_mem_ff = max_bit_position (t_in_code);
+
+ // check to see if this is bigger than any others in this row/column
+
+ if (t_code_mem_ff > t_max_mem)
+ t_max_mem = t_code_mem_ff;
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "c_g[" << m << "][" << n << "]{" <<
+ maio(m,n) << "} = " << n2bs(t_in_code, 8) <<
+ ", code_mem = " << t_code_mem_ff;
+ }
+
+ // check the feedback portion, if it exists;
+ // for soai, check all the inputs which generate this output for
+ // uniqueness; duplicate entries can be combined to reduce total
+ // # of memories as well as required computations.
+
+ if (d_do_feedback == true) {
+ if (DO_PRINT_DEBUG) {
+ std::cout << "\n";
+ }
+
+ // get the FB code; AND off the LSB for correct functionality
+ // during internal computations.
+
+ t_in_code = ((memory_t)((*code_feedback)[maio(m,n)]));
+ t_in_code &= ((memory_t)(-2));
+
+ // find the memory requirement
+
+ size_t t_code_mem_fb = max_bit_position (t_in_code);
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "c_f[" << m << "][" << n << "]{" <<
+ maio(m,n) << "} = " << n2bs(t_in_code, 8) <<
+ ", code_mem = " << t_code_mem_fb;
+ }
+
+ // check to see if this feedback is unique
+
+ size_t l_n_unique_fb = t_n_unique_fb_prev_start;
+ while (l_n_unique_fb < d_n_memories) {
+ if (d_code_feedback[l_n_unique_fb] == t_in_code)
+ break;
+ l_n_unique_fb++;
+ }
+ if (l_n_unique_fb == d_n_memories) {
+
+ // this is a unique feedback;
+
+ d_code_feedback[l_n_unique_fb] = t_in_code;
+ d_n_delays[l_n_unique_fb] = t_code_mem_fb;
+
+ // increase the number of unique feedback codes
+
+ d_n_memories++;
+
+ // store memory requirements for this output
+
+ if (t_max_mem < t_code_mem_fb)
+ t_max_mem = t_code_mem_fb;
+ d_total_n_delays += t_max_mem;
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << ", uq # " << l_n_unique_fb <<
+ ", tot_mem = " << d_total_n_delays;
+ }
+ } else {
+ // not a unique feedback, but the FF might require more memory
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << ", !uq # " << l_n_unique_fb <<
+ " = " << d_n_delays[l_n_unique_fb];
+ }
+
+ if (d_n_delays[l_n_unique_fb] < t_code_mem_ff) {
+ d_total_n_delays += (t_code_mem_ff - d_n_delays[l_n_unique_fb]);
+ d_n_delays[l_n_unique_fb] = t_code_mem_ff;
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << " => " << d_n_delays[l_n_unique_fb] <<
+ ", tot_mem = " << d_total_n_delays;
+ }
+ }
+ }
+ d_io_num[l_n_unique_fb] = ((d_do_encode_soai == true) ? n : m);
+ d_states_ndx[maio(m,n)] = l_n_unique_fb;
+ }
+ if (DO_PRINT_DEBUG) {
+ std::cout << "\n";
+ }
+ if (d_max_delay < t_max_mem)
+ d_max_delay = t_max_mem;
+}
+
+void
+code_convolutional_trellis::create_trellis
+()
+{
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << "c_t: # states = " << d_n_states <<
+ ", d_n_input_combinations = " << d_n_input_combinations << "\n";
+ }
+
+ // first dimension is the number of states
+
+ d_trellis.resize (d_n_states);
+
+ // second dimension (one per first dimension) is the number of input
+ // combinations
+
+ for (size_t m = 0; m < d_n_states; m++) {
+ d_trellis[m].resize (d_n_input_combinations);
+ for (size_t n = 0; n < d_n_input_combinations; n++) {
+ connection_t_ptr t_connection = &(d_trellis[m][n]);
+ t_connection->d_output_bits.resize (d_n_code_outputs);
+ }
+ }
+
+ // fill in the trellis
+
+ for (size_t m = 0; m < d_n_states; m++) {
+ for (size_t n = 0; n < d_n_input_combinations; n++) {
+ connection_t_ptr t_connection = &(d_trellis[m][n]);
+ encode_single (m, n, t_connection->d_to_state,
+ t_connection->d_output_bits);
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << "set d_t[" << n2bs(m,d_total_n_delays) << "][" <<
+ n2bs(n,d_n_code_inputs) << "] : to_st = " <<
+ n2bs(t_connection->d_to_state,d_total_n_delays) << "\n";
+ }
+ }
+ }
+}
+
+void
+code_convolutional_trellis::demux_state
+(memory_t in_state,
+ std::vector<memory_t>& memories)
+{
+ // de-mux bits for the given memory state;
+ // copy them into the provided vector;
+ // assumes state bits start after the LSB (not at &1)
+
+ memories.resize (d_n_memories);
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << "in_st = " << n2bs(in_state,d_total_n_delays) << " ->\n";
+ }
+ for (size_t m = 0; m < d_n_memories; m++) {
+ memories[m] = (in_state << 1) & d_max_mem_masks[m];
+ in_state >>= d_n_delays[m];
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << " #d = " << d_n_delays[m] << ", mem[" << m << "] = " <<
+ n2bs(memories[m], d_n_delays[m]+1) << "\n";
+ }
+ }
+}
+
+memory_t
+code_convolutional_trellis::mux_state
+(const std::vector<memory_t>& memories)
+{
+ // mux bits for the given memory states in d_memory
+ // assumes state bits start after the LSB (not at &1)
+ memory_t t_state = 0;
+ size_t shift = 0;
+ for (size_t m = 0; m < d_n_memories; m++) {
+ t_state |= (memories[m] >> 1) << shift;
+ shift += d_n_delays[m];
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << " #d = " << d_n_delays[m] << ", mem[" << m << "] = " <<
+ n2bs(memories[m], d_n_delays[m]+1) << " -> st = " <<
+ n2bs(t_state, d_total_n_delays) << "\n";
+ }
+ }
+ return (t_state);
+}
+
+void
+code_convolutional_trellis::demux_inputs
+(memory_t inputs,
+ std::vector<char>& in_vec)
+{
+ for (size_t m = 0; m < d_n_code_inputs; m++, inputs >>= 1) {
+ in_vec[m] = (char)(inputs & 1);
+ }
+}
+
+memory_t
+code_convolutional_trellis::mux_inputs
+(const std::vector<char>& in_vec)
+{
+ size_t bit_shift = 0;
+ memory_t inputs = 0;
+ for (size_t m = 0; m < in_vec.size(); m++, bit_shift++) {
+ inputs |= (((memory_t)(in_vec[m]&1)) << bit_shift);
+ }
+ return (inputs);
+}
+
+void
+code_convolutional_trellis::encode_single
+(memory_t in_state,
+ size_t inputs,
+ memory_t& out_state,
+ std::vector<char>& out_bits)
+{
+ // set input parameters
+
+ demux_state (in_state, d_memory);
+ demux_inputs (inputs, d_current_inputs);
+
+ // call the correct function to do the work
+
+ if (d_do_encode_soai == true) {
+ if (d_do_feedback == true) {
+ encode_single_soai_fb ();
+ } else {
+ encode_single_soai ();
+ }
+ } else {
+ if (d_do_feedback == true) {
+ encode_single_siao_fb ();
+ } else {
+ encode_single_siao ();
+ }
+ }
+
+ // retrieve the output parameters
+
+ out_state = mux_state (d_memory);
+ out_bits = d_current_outputs;
+}
+
+void
+code_convolutional_trellis::encode_lookup
+(memory_t& state,
+ const std::vector<char>& inputs,
+ std::vector<char>& out_bits)
+{
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << "using d_t[" << state << "][" << mux_inputs(inputs) <<
+ "] = ";
+ std::cout.flush ();
+ }
+
+ connection_t_ptr t_connection = &(d_trellis[state][mux_inputs(inputs)]);
+
+ if (DO_PRINT_DEBUG_ENCODE) {
+ std::cout << t_connection << ": to_state = "
+ << t_connection->d_to_state << "\n";
+ }
+
+ state = t_connection->d_to_state;
+ out_bits = t_connection->d_output_bits;
+}
+
+void
+code_convolutional_trellis::get_termination_inputs
+(memory_t term_start_state,
+ size_t bit_num,
+ std::vector<char>& inputs)
+{
+ inputs.resize (d_n_code_inputs);
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ inputs[m] = ((d_term_inputs[term_start_state][m]) >> bit_num) & 1;
+ }
+}
+
+void
+code_convolutional_trellis::create_termination_table
+(memory_t end_memory_state)
+{
+ // somewhat involved, but basically start with the terminating state
+ // and work backwards d_total_n_delays, then create a
+ // std::vector<memory_t> of length n_states, once per path required
+ // to get from the given state to the desired termination state.
+ //
+ // each entry represents the bits required to terminate that
+ // particular state, listed in order from LSB for the first input
+ // bit to the MSB for the last input bit.
+
+#if 0
+ // create a reverse trellis
+ // it's temporary, just for doing the termination, so just do it locally
+
+ trellis_t t_trellis;
+
+ // first dimension is the number of states
+
+ t_trellis.resize (d_n_states);
+
+ // second dimension (one per first dimension) is the number of input
+ // combinations
+
+ for (size_t m = 0; m < d_n_states; m++) {
+ t_trellis[m].resize (d_n_input_combinations);
+ }
+
+ std::vector<char> outputs (d_n_code_outputs);
+ memory_t to_state;
+
+ // fill in the trellis
+
+ for (memory_t m = 0; m < d_n_states; m++) {
+ for (memory_t n = 0; n < d_n_input_combinations; n++) {
+ encode_single (m, n, to_state, outputs);
+
+ connection_t_ptr t_connection = &(t_trellis[to_state][n]);
+ t_connection->d_to_state = m;
+#if 0
+ t_connection->d_output_bits.resize (d_n_code_outputs);
+ t_connection->d_output_bits = outputs;
+#endif
+ }
+ }
+
+ // create the output vectors
+
+ term_input_t t_term_inputs;
+ t_term_inputs.resize (d_n_states);
+ for (size_t m = 0; m < d_n_states; m++) {
+ t_term_inputs[m].assign (d_n_code_inputs, 0);
+ }
+
+
+ std::vector<memory_t> t_used_states;
+ t_used_states.assign (d_n_states, 0);
+
+ // setup the first state
+
+ t_states[0] = end_memory_state;
+ size_t n_states = 1;
+
+ for (size_t m = 0; m < d_total_n_delays; m++) {
+ for (size_t n = 0; n < n_states; n++) {
+ memory_t t_end_state = t_states[n];
+ for (size_t p = 0; p < d_n_code_inputs; p++) {
+ connection_t_ptr t_connection = &(t_trellis[t_end_state][p]);
+ memory_t_ptr t_mem = &(t_term_inputs[t_end_state][p]);
+
+
+
+
+ }
+ }
+
+ t_inputs[0] = t_trellis
+#endif
+}
+
+void
+code_convolutional_trellis::encode_single_soai
+()
+{
+ // single-output, all inputs; no feedback
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "Starting encode_single_soai.\n";
+ }
+
+ // shift memories down by 1 bit to make room for feedback; no
+ // masking required.
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ if (DO_PRINT_DEBUG) {
+ std::cout << "m_i[" << p << "] = " <<
+ n2bs(d_memory[p], 1+d_n_delays[p]);
+ }
+
+ d_memory[p] >>= 1;
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << " >>= 1 -> " <<
+ n2bs(d_memory[p], 1+d_n_delays[p]) << "\n";
+ }
+ }
+
+ // for each input bit, if that bit's a '1', then XOR the code
+ // generators into the correct state's memory.
+
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ if (DO_PRINT_DEBUG) {
+ std::cout << "c_i[" << m << "] = " <<
+ n2bs(d_current_inputs[m],1);
+ }
+ if (d_current_inputs[m] == 1) {
+ if (DO_PRINT_DEBUG) {
+ std::cout << "\n";
+ }
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ if (DO_PRINT_DEBUG) {
+ std::cout << "m_i[s_ndx[" << m << "][" << n << "] == " <<
+ d_states_ndx[maio(m,n)] << "] = " <<
+ n2bs(d_memory[d_states_ndx[maio(m,n)]],
+ 1+d_n_delays[d_states_ndx[maio(m,n)]]);
+ }
+
+ d_memory[d_states_ndx[maio(m,n)]] ^= d_code_generators[maio(m,n)];
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << " ^= c_g[][] == " <<
+ n2bs(d_code_generators[maio(m,n)],
+ 1+d_n_delays[d_states_ndx[maio(m,n)]]) <<
+ " -> " << n2bs(d_memory[d_states_ndx[maio(m,n)]],
+ 1+d_n_delays[d_states_ndx[maio(m,n)]]) << "\n";
+ }
+ }
+ } else if (DO_PRINT_DEBUG) {
+ std::cout << " ... nothing to do\n";
+ }
+ }
+
+ for (size_t p = 0; p < d_n_code_outputs; p++) {
+ d_current_outputs[p] = 0;
+ }
+
+ // create the output bits, by XOR'ing the individual unique
+ // memory(ies) into the correct output bit
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ d_current_outputs[d_io_num[p]] ^= ((char)(d_memory[p] & 1));
+ }
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "ending encode_single_soai.\n";
+ }
+}
+
+void
+code_convolutional_trellis::encode_single_soai_fb
+()
+{
+ // single-output, all inputs; with feedback
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "Starting encode_single_soai_fb.\n";
+ }
+
+ // shift memories down by 1 bit to make room for feedback; no
+ // masking required.
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ if (DO_PRINT_DEBUG) {
+ std::cout << "m_i[" << p << "] = " << d_memory[p];
+ }
+
+ d_memory[p] >>= 1;
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << " -> " << d_memory[p] << "\n";
+ }
+ }
+
+ // for each input bit, if that bit's a '1', then XOR the code
+ // generators into the correct state's memory.
+
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ if (d_current_inputs[m] == 1) {
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ d_memory[d_states_ndx[maio(m,n)]] ^= d_code_generators[maio(m,n)];
+ }
+ }
+ }
+
+ for (size_t p = 0; p < d_n_code_outputs; p++) {
+ d_current_outputs[p] = 0;
+ }
+
+ // create the output bits, by XOR'ing the individual unique
+ // memory(ies) into the correct output bit
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ d_current_outputs[d_io_num[p]] ^= ((char)(d_memory[p] & 1));
+ }
+
+ // now that the output bits are fully created, XOR the FB back
+ // into the memories; the feedback bits have the LSB (&1) masked
+ // off already so that it doesn't contribute.
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ if (d_current_outputs[d_io_num[p]] == 1) {
+ d_memory[p] ^= d_code_feedback[p];
+ }
+ }
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "ending encode_single_soai.\n";
+ }
+}
+
+void
+code_convolutional_trellis::encode_single_siao
+()
+{
+ // single input, all outputs; no feedback
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "starting encode_single_siao.\n";
+ }
+
+ // update the memories with the current input bits;
+
+ // for each unique memory (1 per input), shift the delays and mask
+ // off the extra high bits; then XOR in the input bit.
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ d_memory[p] |= ((memory_t)(d_current_inputs[d_io_num[p]]));
+ }
+
+ // create the output bits: for each output, loop over all inputs,
+ // find the output bits for each encoder, and XOR each together
+ // then sum (would usually be sum then XOR, but they're mutable in
+ // base-2 and it's faster this way).
+
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ memory_t t_mem = 0;
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ t_mem ^= ((d_memory[d_states_ndx[maio(m,n)]]) &
+ d_code_generators[maio(m,n)]);
+ }
+ d_current_outputs[n] = sum_bits_mod2 (t_mem, d_max_delay);
+ }
+
+ // post-shift & mask the memory to guarantee that output
+ // state is in the correct bit positions (1 up, not the LSB)
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ d_memory[p] = (d_memory[p] << 1) & d_max_mem_masks[p];
+ }
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "ending encode_single_siao.\n";
+ }
+}
+
+void
+code_convolutional_trellis::encode_single_siao_fb
+()
+{
+ // single input, all outputs; with feedback
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "starting encode_single_siao_fb.\n";
+ }
+
+ // update the memories with the current input bits;
+
+ // for each unique memory (1 per input), shift the delays and mask
+ // off the extra high bits; then XOR in the input bit.
+ // with FB: find the feedback bit, and OR it into the input bit's slot;
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ memory_t t_mem = d_memory[p];
+ memory_t t_fb = t_mem & d_code_feedback[p];
+ char t_fb_bit = sum_bits_mod2 (t_fb, d_max_delay);
+ t_mem |= ((memory_t) t_fb_bit);
+ d_memory[p] = t_mem ^ ((memory_t)(d_current_inputs[d_io_num[p]]));
+ }
+
+ // create the output bits: for each output, loop over all inputs,
+ // find the output bits for each encoder, and XOR each together
+ // then sum (would usually be sum then XOR, but they're mutable in
+ // base-2 and it's faster this way).
+
+ for (size_t n = 0; n < d_n_code_outputs; n++) {
+ memory_t t_mem = 0;
+ for (size_t m = 0; m < d_n_code_inputs; m++) {
+ t_mem ^= ((d_memory[d_states_ndx[maio(m,n)]]) &
+ d_code_generators[maio(m,n)]);
+ }
+ d_current_outputs[n] = sum_bits_mod2 (t_mem, d_max_delay);
+ }
+
+ // post-shift & mask the memory to guarantee that output
+ // state is in the correct bit positions (1 up, not the LSB)
+
+ for (size_t p = 0; p < d_n_memories; p++) {
+ d_memory[p] = (d_memory[p] << 1) & d_max_mem_masks[p];
+ }
+
+ if (DO_PRINT_DEBUG) {
+ std::cout << "ending encode_loop_siao_fb.\n";
+ }
+}
Index: code_convolutional_trellis.h
===================================================================
RCS file: code_convolutional_trellis.h
diff -N code_convolutional_trellis.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ code_convolutional_trellis.h 20 Jul 2006 17:42:54 -0000 1.1
@@ -0,0 +1,372 @@
+/* -*- c++ -*- */
+/*
+ * Copyright 2006 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef INCLUDED_CODE_CONVOLUTIONAL_TRELLIS_H
+#define INCLUDED_CODE_CONVOLUTIONAL_TRELLIS_H
+
+#include "code_types.h"
+#include <vector>
+
+/*
+ * connection_t: describes an output connection from the current
+ * time-bit memory state to the next time-bit memory state
+ *
+ * d_to_state: memory configuration of the "to" state
+ *
+ * d_output_bits: the output bits for this connection
+ */
+
+typedef struct connection_t {
+ memory_t d_to_state;
+ std::vector<char> d_output_bits;
+} connection_t, *connection_t_ptr;
+
+/*
+ * trellis_t: describes a single set of trellis connections, from a
+ * time-bit to the next, forward transitions only
+ *
+ * This is a 2d "matrix", where the first dimention is the starting
+ * memory state, and the second is the (combined) input as an
+ * integer: e.g. for a 2 input code, if I1 = 0 and I2 = 1, then
+ * the combined input is the "number" found by appending I2 and I1
+ * together, in this case 10b = 3.
+ *
+ * The trellis is used to lookup information: given a starting state
+ * and inputs, return the output bits and next state.
+ */
+
+typedef std::vector<std::vector<connection_t> > trellis_t, *trellis_t_ptr;
+
+class code_convolutional_trellis
+{
+/*!
+ * class code_convolutional_trellis
+ *
+ * Create a convolutional code trellis structure, but encoding,
+ * decoding, determining termination transitions, and anything else
+ * which might be useful.
+ *
+ * block_size_bits: if == 0, then do streaming encoding ("infinite"
+ * trellis); otherwise this is the block size in bits to encode
+ * before terminating the trellis. This value -does not- include
+ * any termination bits.
+ *
+ * n_code_inputs:
+ * n_code_outputs:
+ * code_generator: vector of integers (32 bit) representing the code
+ * to be implemented. E.g. "4" in binary is "100", which would be
+ * "D^2" for code generation. "6" == 110b == "D^2 + D"
+ * ==> The vector is listed in order for each output stream, so if there
+ * are 2 input streams (I1, I2) [specified in "n_code_inputs"]
+ * and 2 output streams (O1, O2) [specified in "n_code_outputs"],
+ * then the vector would be the code generator for:
+ * [I1->O1, I2->O1, I1->O2, I2->O2]
+ * with each element being an integer representation of the code.
+ * The "octal" representation is used frequently in the literature
+ * (e.g. [015, 06] == [1101, 0110] in binary) due to its close
+ * relationship with binary (each number is 3 binary digits)
+ * ... but any integer representation will suffice.
+ *
+ * do_termination: valid only if block_size_bits != 0, and defines
+ * whether or not to use trellis termination. Default is to use
+ * termination when doing block coding.
+ *
+ * start_memory_state: when starting a new block, the starting memory
+ * state to begin encoding; there will be a helper function to
+ * assist in creating this value for a given set of inputs;
+ * default is the "all zero" state.
+ *
+ * end_memory_state: when terminating a block, the ending memory
+ * state to stop encoding; there will be a helper function to
+ * assist in creating this value for a given set of inputs;
+ * default is the "all zero" state.
+ */
+
+public:
+ inline code_convolutional_trellis
+ (int block_size_bits,
+ int n_code_inputs,
+ int n_code_outputs,
+ const std::vector<int> &code_generators,
+ bool do_termination = true,
+ int end_memory_state = 0)
+ {code_convolutional_trellis_init (block_size_bits,
+ n_code_inputs,
+ n_code_outputs,
+ code_generators,
+ NULL,
+ do_termination,
+ end_memory_state);};
+
+/*!
+ * Encoder with feedback.
+ *
+ * code_feedback: vector of integers (32 bit) representing the code
+ * feedback to be implemented (same as for the code_generator).
+ * For this feedback type, the LSB ("& 1") is ignored (set to "1"
+ * internally, since it's always 1) ... this (effectively)
+ * represents the input bit for the given encoder, without which
+ * there would be no encoding! Each successive higher-order bit
+ * represents the output of that delay block; for example "6" ==
+ * 110b == "D^2 + D" means use the current input bit + the output
+ * of the second delay block. Listing order is the same as for
+ * the code_generator.
+ */
+
+ inline code_convolutional_trellis
+ (int block_size_bits,
+ int n_code_inputs,
+ int n_code_outputs,
+ const std::vector<int> &code_generators,
+ const std::vector<int> &code_feedback,
+ bool do_termination = true,
+ int end_memory_state = 0)
+ {code_convolutional_trellis_init (block_size_bits,
+ n_code_inputs,
+ n_code_outputs,
+ code_generators,
+ &code_feedback,
+ do_termination,
+ end_memory_state);};
+
+ virtual ~code_convolutional_trellis () {};
+
+/* for remote access to internal info */
+
+ inline size_t block_size_bits () {return (d_block_size_bits);};
+ inline size_t n_code_inputs () {return (d_n_code_inputs);};
+ inline size_t n_code_outputs () {return (d_n_code_outputs);};
+ inline const bool do_termination () {return (d_do_termination);};
+ inline const bool do_feedback () {return (d_do_feedback);};
+ inline const bool do_streaming () {return (d_do_streaming);};
+ inline const size_t total_n_delays () {return (d_total_n_delays);};
+
+ virtual char sum_bits_mod2 (memory_t in_mem, size_t max_memory);
+ void get_termination_inputs (memory_t term_start_state,
+ size_t bit_num,
+ std::vector<char>& inputs);
+
+ void encode_lookup (memory_t& state,
+ const std::vector<char>& inputs,
+ std::vector<char>& out_bits);
+
+ void demux_state (memory_t in_state, std::vector<memory_t>& memories);
+ memory_t mux_state (const std::vector<memory_t>& memories);
+ void demux_inputs (memory_t inputs, std::vector<char>& in_vec);
+ memory_t mux_inputs (const std::vector<char>& in_vec);
+
+protected:
+#if 0
+/*
+ * state_get_from(v,i,k): use to retrieve a given bit-memory state,
+ * from the inputs:
+ *
+ * memory_t v: the value from which to retrieve the given state
+ * size_t i: for which input stream (0 to #I-1)
+ * size_t k: the number of memory slots per input (e.g. 1+D^2 -> 2)
+ */
+
+ inline memory_t state_get_from (memory_t v,
+ size_t i,
+ size_t k)
+ {return (((v)>>((i)*(k)))&((1<<(k))-1));};
+
+/*
+ * state_add_to(s,v,i,k): use to create a given bit-memory state,
+ * from the inputs:
+ *
+ * memory_t s: the state value to modify
+ * memory_t v: value to set the state to for this input
+ * size_t i: for which input stream (0 to #I-1)
+ * size_t k: the number of memory slots per input (e.g. 1+D^2 -> 2)
+ */
+
+ inline void state_add_to (memory_t s,
+ memory_t v,
+ size_t i,
+ size_t k)
+ {(s)|=(((v)&((1<<(k))-1))<<((i)*(k)));};
+#endif
+
+/*
+ * maio(i,o): matrix access into a vector, knowing the # of code
+ * outputs (from inside the class). References into a vector with
+ * code inputs ordered by code output.
+ *
+ * 'i' is the 1st dimension - faster memory - the code input
+ * 'o' is the 2nd dimension - slower memory - the code output
+ *
+ * returns ((o*n_code_inputs) + i)
+ */
+
+ inline size_t maio(size_t i, size_t o) {return ((o*d_n_code_inputs) + i);};
+
+/*
+ * maoi(i,o): matrix access into a vector, knowing the # of code
+ * inputs (from inside the class). References into a vector with
+ * code outputs ordered by code input.
+ *
+ * 'o' is the 1st dimension - faster memory - the code output
+ * 'i' is the 2nd dimension - slower memory - the code input
+ *
+ * returns ((i*n_code_outputs) + o)
+ */
+
+ inline size_t maoi(size_t i, size_t o) {return ((i*d_n_code_outputs) + o);};
+
+/*
+ * max_bit_position (x): returns the bit-number of the highest "1" bit
+ * in the provided value, such that the LSB would return 0 and the MSB
+ * of a long would return 31.
+ */
+
+ inline size_t max_bit_position (memory_t x)
+ {
+ size_t t_code_mem = 0;
+ memory_t t_in_code = x >> 1;
+ while (t_in_code != 0) {
+ t_in_code >>= 1;
+ t_code_mem++;
+ }
+
+ return (t_code_mem);
+ }
+
+ // methods defined in this class
+
+ void code_convolutional_trellis_init
+ (int block_size_bits,
+ int n_code_inputs,
+ int n_code_outputs,
+ const std::vector<int>& code_generators,
+ const std::vector<int>* code_generators,
+ bool do_termination,
+ int end_memory_state);
+
+ void create_trellis ();
+ void create_termination_table (memory_t end_memory_state);
+ void encode_single (memory_t in_state,
+ memory_t inputs,
+ memory_t& out_state,
+ std::vector<char>& out_bits);
+ virtual void encode_single_soai ();
+ virtual void encode_single_siao ();
+ virtual void encode_single_soai_fb ();
+ virtual void encode_single_siao_fb ();
+
+ void get_memory_requirements (size_t m,
+ size_t n,
+ size_t& t_max_mem,
+ size_t& t_n_unique_fb_prev_start,
+ const std::vector<int>* code_feedback);
+
+ // variables
+
+ size_t d_block_size_bits, d_n_code_outputs;
+ size_t d_n_code_inputs, d_n_input_combinations;
+ bool d_do_streaming, d_do_termination, d_do_feedback, d_do_encode_soai;
+
+ // "max_delay" is the max # of delays for all unique generators (ff and fb),
+ // needed to determine (e.g.) termination
+
+ size_t d_max_delay;
+
+ // "n_memories" is the number of unique memories as determined by
+ // either the feedforward or feedback generators (not both). For
+ // FF, this number equals either the number of code inputs (for
+ // SIAO) or outputs (for SOAI).
+
+ size_t d_n_memories;
+
+ // "total_n_delays" is the total # of delays, needed to determine the
+ // # of states in the decoder
+ // "n_states" = (2^n_delays) - 1 .. the number of memory states
+
+ size_t d_total_n_delays, d_n_states;
+
+ // "code generators" are stored internally in "maXY(i,o)" order this
+ // allows for looping over all a single output and computing all
+ // input parts sequentially.
+
+ std::vector<memory_t> d_code_generators;
+
+ // "feedback" are found as "d_n_memories" unique entries, and stored
+ // in at most 1 entry per I/O combination. Listed in the same order
+ // as "d_io_num" entries show.
+
+ std::vector<memory_t> d_code_feedback;
+
+ // "n_delays" is a vector, the number of delays for the FB generator
+ // in the same [] location; also relates directly to the
+ // "max_mem_masks" in the same [] location.
+
+ std::vector<size_t> d_n_delays;
+
+ // "io_num" is a vector, mapping which FB in SIAO goes with which
+ // input, or which FB in SOAI goes with which output
+
+ std::vector<size_t> d_io_num;
+
+ // "max_mem_masks" are the memory masks, one per unique FB for SIAO;
+ // otherwise not used.
+
+ std::vector<size_t> d_states_ndx;
+
+ // "memory" are the actual stored delay bits, one memory for each
+ // unique FF or FB code generator;
+ // interpreted w/r.t. the actual FF and FB code generators and
+ // SOAI / SIAO realization;
+
+ std::vector<memory_t> d_max_mem_masks;
+
+ // "states_ndx" is a "matrix" whose contents are the indices into
+ // the "io_num" vector, telling which input goes with which
+ // state; uses the same "maXY(i,o)" as the code generators.
+
+ std::vector<memory_t> d_memory;
+
+ // "term_inputs" are the inputs required to terminate the trellis -
+ // interpreted w/r.t. the actual FF and FB code generators and
+ // SOAI / SIAO realization;
+ // first dimension is the memory state #;
+ // second dimension is the input stream #;
+ // bits are packed, with the first input being the LSB and the last
+ // input being closest to the MSB.
+
+ typedef std::vector<std::vector<memory_t> > term_input_t;
+ term_input_t d_term_inputs;
+
+ // "inputs" are the current input bits, in the LSB (&1) of each "char"
+
+ std::vector<char> d_current_inputs;
+
+ // "outputs" are the current output bits, in the LSB (&1) of each "char"
+
+ std::vector<char> d_current_outputs;
+
+ // "trellis" is the single-stage memory state transition ("trellis")
+ // representation for this code; forward paths only
+
+ trellis_t d_trellis;
+};
+
+#endif /* INCLUDED_CODE_CONVOLUTIONAL_TRELLIS_H */
Index: code_types.h
===================================================================
RCS file: code_types.h
diff -N code_types.h
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ code_types.h 20 Jul 2006 17:42:54 -0000 1.1
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2006 Free Software Foundation, Inc.
+ *
+ * This file is part of GNU Radio
+ *
+ * GNU Radio is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * GNU Radio is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GNU Radio; see the file COPYING. If not, write to
+ * the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ */
+
+#ifndef INCLUDED_CODE_TYPES_H
+#define INCLUDED_CODE_TYPES_H
+
+#include <sys/types.h>
+
+// the following is the type used for encoder memory
+
+typedef unsigned long memory_t, *memory_ptr_t;
+
+#endif /* INCLUDED_CODE_TYPES_H */