Skip to content

Instantly share code, notes, and snippets.

@xdsopl
Last active August 5, 2020 14:49
Show Gist options
  • Select an option

  • Save xdsopl/8c71481c1017b7b060e45b69042f1ebf to your computer and use it in GitHub Desktop.

Select an option

Save xdsopl/8c71481c1017b7b060e45b69042f1ebf to your computer and use it in GitHub Desktop.
Test bench for ordered statistics decoding
.*.swp
testbench
CXXFLAGS = -std=c++11 -W -Wall -Ofast -fno-exceptions -fno-rtti
CXX = clang++ -stdlib=libc++ -march=native
#CXX = g++ -march=native
.PHONY: all
all: testbench
.PHONY: test
test: testbench
./testbench
testbench: testbench.cc
$(CXX) $(CXXFLAGS) $< -o $@
.PHONY: clean
clean:
rm -f testbench
/*
Test bench for ordered statistics decoding
Copyright 2020 Ahmet Inan <xdsopl@gmail.com>
*/
#include <random>
#include <cassert>
#include <iostream>
#include <algorithm>
#include <functional>
template <int N, int K>
class LinearEncoder
{
public:
void operator()(int8_t *codeword, const int8_t *message, const int8_t *genmat)
{
for (int i = 0; i < N; ++i)
codeword[i] = message[0] & genmat[i];
for (int j = 1; j < K; ++j)
for (int i = 0; i < N; ++i)
codeword[i] ^= message[j] & genmat[N*j+i];
}
};
template <int N, int K>
class BoseChaudhuriHocquenghemGenerator
{
static const int NP = N - K;
public:
static void poly(int8_t *genpoly, std::initializer_list<int> minimal_polynomials)
{
// $genpoly(x) = \prod_i(minpoly_i(x))$
int genpoly_degree = 1;
for (int i = 0; i < NP; ++i)
genpoly[i] = 0;
genpoly[NP] = 1;
for (auto m: minimal_polynomials) {
assert(0 < m);
int m_degree = 0;
while (m>>m_degree)
++m_degree;
--m_degree;
assert(genpoly_degree + m_degree <= NP + 1);
for (int i = genpoly_degree; i >= 0; --i) {
if (!genpoly[NP-i])
continue;
genpoly[NP-i] = m&1;
for (int j = 1; j <= m_degree; ++j)
genpoly[NP-(i+j)] ^= (m>>j)&1;
}
genpoly_degree += m_degree;
}
assert(genpoly_degree == NP + 1);
assert(genpoly[0]);
assert(genpoly[NP]);
if (0) {
std::cerr << "generator polynomial =" << std::endl;
for (int i = 0; i <= NP; ++i)
std::cerr << (int)genpoly[i];
std::cerr << std::endl;
}
}
static void matrix(int8_t *genmat, bool systematic, std::initializer_list<int> minimal_polynomials)
{
poly(genmat, minimal_polynomials);
for (int i = NP+1; i < N; ++i)
genmat[i] = 0;
for (int j = 1; j < K; ++j) {
for (int i = 0; i < j; ++i)
genmat[N*j+i] = 0;
for (int i = 0; i <= NP; ++i)
genmat[(N+1)*j+i] = genmat[i];
for (int i = j+NP+1; i < N; ++i)
genmat[N*j+i] = 0;
}
if (systematic)
for (int k = K-1; k; --k)
for (int j = 0; j < k; ++j)
if (genmat[N*j+k])
for (int i = k; i < N; ++i)
genmat[N*j+i] ^= genmat[N*k+i];
if (0) {
std::cerr << "generator matrix =" << std::endl;
for (int j = 0; j < K; ++j) {
for (int i = 0; i < N; ++i)
std::cerr << (int)genmat[N*j+i];
std::cerr << std::endl;
}
}
}
};
template <int N, int K, int O>
class OrderedStatisticsDecoder
{
int8_t G[N*K];
int8_t codeword[N], candidate[N];
int8_t softperm[N];
int16_t perm[N];
void row_echelon()
{
for (int k = 0; k < K; ++k) {
// find pivot in this column
for (int j = k; j < K; ++j) {
if (G[N*j+k]) {
for (int i = k; j != k && i < N; ++i)
std::swap(G[N*j+i], G[N*k+i]);
break;
}
}
// keep searching for suitable column for pivot
// beware: this will use columns >= K if necessary.
for (int j = k + 1; !G[N*k+k] && j < N; ++j) {
for (int h = k; h < K; ++h) {
if (G[N*h+j]) {
// account column swap
std::swap(perm[k], perm[j]);
for (int i = 0; i < K; ++i)
std::swap(G[N*i+k], G[N*i+j]);
for (int i = k; h != k && i < N; ++i)
std::swap(G[N*h+i], G[N*k+i]);
break;
}
}
}
assert(G[N*k+k]);
// zero out column entries below pivot
for (int j = k + 1; j < K; ++j)
if (G[N*j+k])
for (int i = k; i < N; ++i)
G[N*j+i] ^= G[N*k+i];
}
}
void systematic()
{
for (int k = K-1; k; --k)
for (int j = 0; j < k; ++j)
if (G[N*j+k])
for (int i = k; i < N; ++i)
G[N*j+i] ^= G[N*k+i];
}
void encode()
{
for (int i = K; i < N; ++i)
codeword[i] = codeword[0] & G[i];
for (int j = 1; j < K; ++j)
for (int i = K; i < N; ++i)
codeword[i] ^= codeword[j] & G[N*j+i];
}
void flip(int j)
{
codeword[j] ^= 1;
for (int i = K; i < N; ++i)
codeword[i] ^= G[N*j+i];
}
static int metric(const int8_t *soft, const int8_t *hard)
{
int sum = 0;
for (int i = 0; i < N; ++i)
sum += (1 - 2 * hard[i]) * soft[i];
return sum;
}
public:
bool operator()(int8_t *hard, const int8_t *soft, const int8_t *genmat)
{
for (int i = 0; i < N; ++i)
perm[i] = i;
std::sort(perm, perm+N, [soft](int a, int b){ return std::abs(soft[a]) > std::abs(soft[b]); });
for (int j = 0; j < K; ++j)
for (int i = 0; i < N; ++i)
G[N*j+i] = genmat[N*j+perm[i]];
row_echelon();
systematic();
for (int i = 0; i < N; ++i)
softperm[i] = soft[perm[i]];
for (int i = 0; i < K; ++i)
codeword[i] = softperm[i] < 0;
encode();
for (int i = 0; i < N; ++i)
candidate[i] = codeword[i];
int best = metric(softperm, codeword);
int next = -1;
auto update = [this, &best, &next]() {
int met = metric(softperm, codeword);
if (met > best) {
next = best;
best = met;
for (int i = 0; i < N; ++i)
candidate[i] = codeword[i];
} else if (met > next) {
next = met;
}
};
for (int a = 0; O >= 1 && a < K; ++a) {
flip(a);
update();
for (int b = a + 1; O >= 2 && b < K; ++b) {
flip(b);
update();
for (int c = b + 1; O >= 3 && c < K; ++c) {
flip(c);
update();
for (int d = c + 1; O >= 4 && d < K; ++d) {
flip(d);
update();
for (int e = d + 1; O >= 5 && e < K; ++e) {
flip(e);
update();
for (int f = e + 1; O >= 6 && f < K; ++f) {
flip(f);
update();
flip(f);
}
flip(e);
}
flip(d);
}
flip(c);
}
flip(b);
}
flip(a);
}
for (int i = 0; i < N; ++i)
hard[perm[i]] = candidate[i];
return best != next;
}
};
int main()
{
std::random_device rd;
typedef std::default_random_engine generator;
typedef std::uniform_int_distribution<int> distribution;
auto data = std::bind(distribution(0, 1), generator(rd()));
const int O = 3;
bool systematic = true;
int loops = 10;
#if 0
const int N = 128;
const int K = 64;
int8_t genmat[N*K];
for (int j = 0; j < K; ++j)
for (int i = 0; i < N; ++i)
genmat[N*j+i] = systematic && i < K ? i == j : data();
#endif
#if 1
const int N = 127;
const int K = 64;
int8_t genmat[N*K];
BoseChaudhuriHocquenghemGenerator<127, 64>::matrix(genmat, systematic, {
0b10001001, 0b10001111, 0b10011101,
0b11110111, 0b10111111, 0b11010101,
0b10000011, 0b11101111, 0b11001011,
});
#endif
LinearEncoder<N, K> encode;
OrderedStatisticsDecoder<N, K, O> decode;
int8_t message[K], decoded[N], codeword[N], orig[N], noisy[N];
double symb[N];
double low_SNR = -10;
double high_SNR = 10;
double min_SNR = high_SNR;
int count = 0;
std::cerr << "SNR BER Eb/N0" << std::endl;
for (double SNR = low_SNR; count <= 3 && SNR <= high_SNR; SNR += 0.1, ++count) {
//double mean_signal = 0;
double sigma_signal = 1;
double mean_noise = 0;
double sigma_noise = std::sqrt(sigma_signal * sigma_signal / (2 * std::pow(10, SNR / 10)));
typedef std::normal_distribution<double> normal;
auto awgn = std::bind(normal(mean_noise, sigma_noise), generator(rd()));
int awgn_errors = 0;
int quantization_erasures = 0;
int uncorrected_errors = 0;
int ambiguity_erasures = 0;
for (int l = 0; l < loops; ++l) {
for (int i = 0; i < K; ++i)
message[i] = data();
encode(codeword, message, genmat);
for (int i = 0; i < N; ++i)
orig[i] = 1 - 2 * codeword[i];
for (int i = 0; i < N; ++i)
symb[i] = orig[i];
for (int i = 0; i < N; ++i)
symb[i] += awgn();
// $LLR=log(\frac{p(x=+1|y)}{p(x=-1|y)})$
// $p(x|\mu,\sigma)=\frac{1}{\sqrt{2\pi}\sigma}}e^{-\frac{(x-\mu)^2}{2\sigma^2}}$
double DIST = 2; // BPSK
double fact = DIST / (sigma_noise * sigma_noise);
for (int i = 0; i < N; ++i)
noisy[i] = std::min<double>(std::max<double>(std::nearbyint(fact * symb[i]), -127), 127);
bool unique = decode(decoded, noisy, genmat);
for (int i = 0; i < N; ++i)
awgn_errors += noisy[i] * orig[i] < 0;
for (int i = 0; i < N; ++i)
quantization_erasures += !noisy[i];
for (int i = 0; i < N; ++i)
uncorrected_errors += decoded[i] != codeword[i];
if (!unique)
ambiguity_erasures += N;
}
double bit_error_rate = (double)uncorrected_errors / (double)(N * loops);
if (!uncorrected_errors && !ambiguity_erasures)
min_SNR = std::min(min_SNR, SNR);
else
count = 0;
int MOD_BITS = 1; // BPSK
double code_rate = (double)K / (double)N;
double spectral_efficiency = code_rate * MOD_BITS;
double EbN0 = 10 * std::log10(sigma_signal * sigma_signal / (spectral_efficiency * 2 * sigma_noise * sigma_noise));
if (0) {
std::cerr << SNR << " Es/N0 => AWGN with standard deviation of " << sigma_noise << " and mean " << mean_noise << std::endl;
std::cerr << EbN0 << " Eb/N0, using spectral efficiency of " << spectral_efficiency << " from " << code_rate << " code rate and " << MOD_BITS << " bits per symbol." << std::endl;
std::cerr << awgn_errors << " errors caused by AWGN." << std::endl;
std::cerr << quantization_erasures << " erasures caused by quantization." << std::endl;
std::cerr << uncorrected_errors << " errors uncorrected." << std::endl;
std::cerr << ambiguity_erasures << " ambiguity erasures." << std::endl;
std::cerr << bit_error_rate << " bit error rate." << std::endl;
} else {
std::cout << SNR << " " << bit_error_rate << " " << EbN0 << std::endl;
}
}
std::cerr << "QEF at: " << min_SNR << " SNR" << std::endl;
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment