using PyPlot
function bern(p)
x = rand(1)[1]
return x<p ? 1 : 0
end
function BSC(c, p)
# input: code vector c
# output: noisy vector y
y = copy(c)
for i in 1:length(c)
y[i] += bern(p)
end
return y % 2
end
Generate random input bits $u\in \{0,1\}^4$ and take $c=uG$.
function genRandBitVec(k)
# return a uniform binary (row) vector from [0:2^{k-1}]
u = [parse(Int, s) for s in bin(rand(0:2^(k-1)), k)]'
end
Generate a codebook with a given generator matrix:
function genCodebook(G)
k, n = size(G)
codebook = Array{Int}(2^k, n)
for i in 0:2^k-1
u = [parse(Int, s) for s in bin(i, k)]' # input bit; row vector
codebook[i+1, :] = u*G % 2
end
return codebook
end
Consider the systematic generator matrix $G = [I_{k}, A]$:
G = [1 0 0 0 1 1 0;
0 1 0 0 1 0 1;
0 0 1 0 0 1 1;
0 0 0 1 1 1 1]
codebook = genCodebook(G)
dist = sum(codebook, 2)'
Then the parity check matrix can be easily obtained by $H=[A^T,I_{n-k}]$:
k, n = size(G)
H = Array{Int}(n-k, n)
H[:, 1:k] = G[:, k+1:end]'
H[:, k+1:end] = eye(n-k)
H
Check orthogonality: $HG^T=0$
H*G' % 2
function mindistdec(y, codebook)
added = (codebook + repmat(y, size(codebook,1), 1)) % 2
dist = sum(added, 2)
mindist = minimum(dist)
minidx_list = find(x->x==mindist, dist)
return codebook[rand(minidx_list), :]' # if there is tie, pick one at random
end
function syndromedec(y)
# input: received bits y
# output: corrected codeword chat
chat = copy(y)
r = H*y' % 2
erridx = 0
for j = 1:7
if norm(H[:, j] - r) < 1
erridx = j
break
end
end
if erridx != 0
chat[erridx] = (chat[erridx]+1) % 2
end
return chat
end
For a sanity check of our decoders:
for i in 1:7
z = zeros(Int64, 7)'
z[i] = 1
corruptedcodebook = (codebook + repmat(z, size(codebook,1), 1)) %2
for j in 1:size(codebook, 1)
c = corruptedcodebook[j, :]' # row vector
chat1 = syndromedec(c)
chat2 = mindistdec(c, codebook)
if codebook[j,:]' != chat1
print("Hamming decoder is wrong!\n")
elseif codebook[j,:]' != chat2
print("Minimum distance decoder is wrong!\n")
end
# print(codebook[j,:]'==chat1, " ", codebook[j,:]'==chat2, "\n")
# print(codebook[j,:]', " ", chat1, " ", chat2, "\n")
end
end
The Hamming code can correct 1 error bit. Hence, decoding succeeds when if and only if less than 1 bit is flipped. Analytically therefore we have the probability of failure: $1-(1-p)^7-7(1-p)^6p=1-(1-p)^7(1+6p)$.
Find error rate of a given code.
# Find BER and WER over BSC(p)
function findER(G, plist, nblock=10^4)
k, n = size(G)
codebook = genCodebook(G)
ber_list = zeros(Float64, 1:length(plist))
wer_list = zeros(Float64, 1:length(plist))
for jj in 1:length(plist)
p = plist[jj]
for ii = 1:nblock
u = genRandBitVec(k) # random input bits
c = u*G % 2 # code bits
y = BSC(c, p) # received bits
chat = mindistdec(y, codebook) # corrected bits
dist = sum((c[1:k]+chat[1:k])%2)
ber_list[jj] += dist/k
wer_list[jj] += dist>0?1:0
end
end
ber_list /= nblock
wer_list /= nblock
return ber_list, wer_list
end
Now with (7,4) Hamming code:
plist = 0.05:0.05:0.5
ber_list_ham, wer_list_ham = findER(G, plist, 10^4)
plot(plist, ber_list_ham, label="BER", marker="o")
plot(plist, wer_list_ham, label="WER", marker="*")
# plot(plist, 4*ber_list_ham, label="4*BER", marker="*")
plot(plist, 1-(1-plist).^6.*(1+6*plist), label="WER: Theory")
title("(7,4) Hamming code")
xlabel("Error probability p")
ylabel("Error rate")
legend(loc = "upper left", fancybox = "true")
G2 = [1 0 0 0 1 0 0;
0 1 0 0 0 1 0;
0 0 1 0 0 0 1;
0 0 0 1 1 1 1]
codebook2 = genCodebook(G2)
The minimum distance of this code is 2.
weight = sum(codebook2, 2)'
ber_list, wer_list = findER(G2, plist)
plot(plist, ber_list, label="BER (f)", marker="o")
plot(plist, wer_list, label="WER (f)", marker="o")
plot(plist, ber_list_ham, label="BER: (7,4) Hamming code")
plot(plist, wer_list_ham, label="WER: (7,4) Hamming code")
# plot(plist, 4*ber_list, label="4*BER", marker="*")
xlabel("Error probability p")
ylabel("Error rate")
legend(loc = "upper left", fancybox = "true")
We see that Hamming code uniformly outperforms the second code. This is expected, since $d_{\text{min}}$ for the second code is only 2 (verify this!), whereas it is 3 for Hamming.
Assuming that if there are multiple codewords closest to the received vector, then the minimum distance decoder chooses one of the closest codewords uniformly at random, can you show that the word error probability for the second code over a BSC$(p)$ is given by $$ \begin{align*} P_e & = 3p(1-p)^6+18p^2(1-p)^5+35p^3(1-p)^3+21p^5(1-p)^2+7p^6(1-p)+p^7 ? \end{align*} $$
(Hint: you might want to use the following modified minimum distance decoder.)
function mindistdec_debug(y, codebook)
added = (codebook + repmat(y, size(codebook,1), 1)) % 2
dist = sum(added, 2)
mindist = minimum(dist)
minidx_list = find(x->x==mindist, dist)
return minidx_list # if there is tie, pick one at random
end
Let us verify that theory matches experiments for the second code.
coeff = [0.0 3.0 18.0 35.0 35.0 21.0 7.0 1.0]
function thwer(p, coeff)
s = zeros(length(p))
for w in 0:length(coeff)-1
s += coeff[w+1]*p.^w.*(1-p).^(7-w)
end
return s
end
plot(plist, thwer(plist, coeff), label="WER: Theoretical")
plot(plist, wer_list, label="WER (f)", marker="o")
xlabel("Error probability p")
ylabel("Error rate")
legend(loc = "upper left", fancybox = "true")
Generate a random systematic generator matrix $G=[I_k, A]$. We decide $A$ as a $k\times(n-k)$ random matrix where each entry is drawn from Bernoulli(1/2) independently.
niter = 10^3
avgber_list = zeros(Float64, 1:length(plist))
avgwer_list = zeros(Float64, 1:length(plist))
n, k = 7, 4
tic();
for ll = 1:niter
A = reshape([bern(1/2) for i in 1:k*(n-k)], k, n-k)
Grand = [eye(Int64, k) A]
ber_list_new, wer_list_new = findER(Grand, plist, 10^2)
avgber_list += ber_list_new
avgwer_list += wer_list_new
end
toc();
avgber_list /= niter;
avgwer_list /= niter;
plot(plist, avgber_list, label="BER: Random code", marker="*")
plot(plist, avgwer_list, label="WER: Random code", marker="*")
plot(plist, ber_list, label="BER (f)", marker="o")
plot(plist, wer_list, label="WER (f)", marker="o")
plot(plist, ber_list_ham, label="BER: (7,4) Hamming code")
plot(plist, wer_list_ham, label="WER: (7,4) Hamming code")
title("Random (7,4) code")
xlabel("Error probability p")
ylabel("Error rate")
legend(loc = "upper left", fancybox = "true")
We see that Hamming code uniformly outperforms both the random code and the second code, but the random codes perform marginally better than the second code.