From 55523f0ffeda1fc43e5a2abe350f6dea94121df7 Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Tue, 31 Dec 2024 20:51:48 -0300 Subject: [PATCH 1/8] feat: add method to compute Kraft parameter of a fixed-to-variable length code --- .../_lossless_coding/FixedToVariableCode.py | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/komm/_lossless_coding/FixedToVariableCode.py b/src/komm/_lossless_coding/FixedToVariableCode.py index 32a82107..2a8a72ec 100644 --- a/src/komm/_lossless_coding/FixedToVariableCode.py +++ b/src/komm/_lossless_coding/FixedToVariableCode.py @@ -257,6 +257,34 @@ def is_prefix_free(self) -> bool: """ return is_prefix_free(self.codewords) + def kraft_parameter(self) -> float: + r""" + Computes the Kraft parameter $K$ of the code. This quantity is given by + $$ + K = \sum_{u \in \mathcal{S}^k} T^{-{\ell_u}}, + $$ + where $\ell_u$ is the length of the codeword $\Enc(u)$, $T$ is the target cardinality, and $k$ is the source block size. + + Returns: + kraft_parameter: The Kraft parameter $K$ of the code. + + Examples: + >>> code = komm.FixedToVariableCode.from_codewords(5, [(0,0,0), (0,0,1), (0,1,0), (1,0,1), (1,1)]) + >>> code.kraft_parameter() + np.float64(0.75) + + >>> code = komm.FixedToVariableCode.from_codewords(4, [(0,), (1,0), (1,1,0), (1,1,1)]) + >>> code.kraft_parameter() + np.float64(1.0) + + >>> code = komm.FixedToVariableCode.from_codewords(4, [(0,0), (1,1), (0,), (1,)]) + >>> code.kraft_parameter() + np.float64(1.5) + """ + T = self.target_cardinality + lengths = np.array([len(word) for word in self.codewords]) + return np.sum(np.float_power(T, -lengths)) + def rate(self, pmf: npt.ArrayLike) -> float: r""" Computes the expected rate $R$ of the code, considering a given pmf. This quantity is given by From 9d3941e149f7487540f837ad63f45541b77eef69 Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Tue, 31 Dec 2024 21:07:17 -0300 Subject: [PATCH 2/8] chore: polish lossless coding --- .../_lossless_coding/FixedToVariableCode.py | 6 +-- src/komm/_lossless_coding/HuffmanCode.py | 38 +++++++-------- src/komm/_lossless_coding/TunstallCode.py | 23 +++++----- .../_lossless_coding/VariableToFixedCode.py | 9 ++-- src/komm/_lossless_coding/util.py | 20 ++++---- tests/lossless_coding/test_codes.py | 46 +++++++++---------- 6 files changed, 72 insertions(+), 70 deletions(-) diff --git a/src/komm/_lossless_coding/FixedToVariableCode.py b/src/komm/_lossless_coding/FixedToVariableCode.py index 2a8a72ec..8c199958 100644 --- a/src/komm/_lossless_coding/FixedToVariableCode.py +++ b/src/komm/_lossless_coding/FixedToVariableCode.py @@ -8,7 +8,7 @@ from .util import ( Word, is_prefix_free, - is_uniquely_decipherable, + is_uniquely_parsable, parse_fixed_length, parse_prefix_free, ) @@ -236,7 +236,7 @@ def is_uniquely_decodable(self) -> bool: >>> code.is_uniquely_decodable() # 010 can be parsed as 0|10 or 01|0 False """ - return is_uniquely_decipherable(self.codewords) + return is_uniquely_parsable(self.codewords) def is_prefix_free(self) -> bool: r""" @@ -334,7 +334,7 @@ def encode(self, input: npt.ArrayLike) -> npt.NDArray[np.integer]: >>> code.encode([0, 1, 0]) # Not a multiple of the source block size Traceback (most recent call last): ... - ValueError: length of 'input' must be a multiple of block size 2 (got 3) + ValueError: length of input must be a multiple of block size 2 (got 3) >>> code.encode([0, 7, 0, 0]) # 07 is not a valid source word Traceback (most recent call last): diff --git a/src/komm/_lossless_coding/HuffmanCode.py b/src/komm/_lossless_coding/HuffmanCode.py index 85eba411..8fbd1081 100644 --- a/src/komm/_lossless_coding/HuffmanCode.py +++ b/src/komm/_lossless_coding/HuffmanCode.py @@ -1,4 +1,5 @@ -import heapq +from dataclasses import dataclass +from heapq import heapify, heappop, heappush from itertools import product from math import prod from typing import Literal @@ -21,6 +22,7 @@ def HuffmanCode( Notes: Huffman codes are always [prefix-free](/ref/FixedToVariableCode/#is_prefix_free) (hence [uniquely decodable](/ref/FixedToVariableCode/#is_uniquely_decodable)). + Parameters: pmf: The probability mass function of the source. source_block_size: The source block size $k$. The default value is $k = 1$. @@ -63,12 +65,12 @@ def HuffmanCode( def huffman_algorithm( pmf: PMF, source_block_size: int, policy: Literal["high", "low"] ) -> list[Word]: + @dataclass class Node: - def __init__(self, index: int, probability: float): - self.index: int = index - self.probability: float = probability - self.parent: int | None = None - self.bit: int = -1 + index: int + probability: float + parent: int | None = None + bit: int = -1 def __lt__(self, other: Self) -> bool: i0, p0 = self.index, self.probability @@ -79,28 +81,28 @@ def __lt__(self, other: Self) -> bool: return (p0, -i0) < (p1, -i1) tree = [ - Node(i, prod(probs)) - for (i, probs) in enumerate(product(pmf, repeat=source_block_size)) + Node(index, prod(probs)) + for (index, probs) in enumerate(product(pmf, repeat=source_block_size)) ] - queue = [node for node in tree] - heapq.heapify(queue) - while len(queue) > 1: - node1 = heapq.heappop(queue) - node0 = heapq.heappop(queue) + heap = tree.copy() + heapify(heap) + while len(heap) > 1: + node1 = heappop(heap) + node0 = heappop(heap) node1.bit = 1 node0.bit = 0 node = Node(index=len(tree), probability=node0.probability + node1.probability) node0.parent = node1.parent = node.index - heapq.heappush(queue, node) + heappush(heap, node) tree.append(node) codewords: list[Word] = [] - for symbol in range(pmf.size**source_block_size): - node = tree[symbol] + for index in range(pmf.size**source_block_size): + node = tree[index] bits: list[int] = [] while node.parent is not None: - bits.insert(0, node.bit) + bits.append(node.bit) node = tree[node.parent] - codewords.append(tuple(bits)) + codewords.append(tuple(reversed(bits))) return codewords diff --git a/src/komm/_lossless_coding/TunstallCode.py b/src/komm/_lossless_coding/TunstallCode.py index 05143949..b3647939 100644 --- a/src/komm/_lossless_coding/TunstallCode.py +++ b/src/komm/_lossless_coding/TunstallCode.py @@ -1,4 +1,5 @@ -import heapq +from dataclasses import dataclass +from heapq import heapify, heappop, heappush from math import ceil, log2 import numpy.typing as npt @@ -58,22 +59,22 @@ def TunstallCode( def tunstall_algorithm(pmf: PMF, code_block_size: int) -> list[Word]: + @dataclass class Node: - def __init__(self, symbols: Word, probability: float): - self.symbols = symbols - self.probability = probability + sourceword: Word + probability: float def __lt__(self, other: Self) -> bool: return -self.probability < -other.probability - queue = [Node((symbol,), probability) for (symbol, probability) in enumerate(pmf)] - heapq.heapify(queue) + heap = [Node((symbol,), probability) for (symbol, probability) in enumerate(pmf)] + heapify(heap) - while len(queue) + pmf.size - 1 < 2**code_block_size: - node = heapq.heappop(queue) + while len(heap) + pmf.size - 1 < 2**code_block_size: + node = heappop(heap) for symbol, probability in enumerate(pmf): - new_node = Node(node.symbols + (symbol,), node.probability * probability) - heapq.heappush(queue, new_node) - sourcewords = sorted(node.symbols for node in queue) + new_node = Node(node.sourceword + (symbol,), node.probability * probability) + heappush(heap, new_node) + sourcewords = sorted(node.sourceword for node in heap) return sourcewords diff --git a/src/komm/_lossless_coding/VariableToFixedCode.py b/src/komm/_lossless_coding/VariableToFixedCode.py index d5ce1763..e65593fb 100644 --- a/src/komm/_lossless_coding/VariableToFixedCode.py +++ b/src/komm/_lossless_coding/VariableToFixedCode.py @@ -9,7 +9,7 @@ Word, is_fully_covering, is_prefix_free, - is_uniquely_decipherable, + is_uniquely_parsable, parse_fixed_length, parse_prefix_free, ) @@ -272,7 +272,7 @@ def is_uniquely_encodable(self) -> bool: >>> code.is_uniquely_encodable() True """ - return is_uniquely_decipherable(self.sourcewords) + return is_uniquely_parsable(self.sourcewords) def is_prefix_free(self) -> bool: r""" @@ -317,9 +317,10 @@ def rate(self, pmf: npt.ArrayLike) -> float: np.float64(1.3846153846153846) """ pmf = PMF(pmf) + n = self.target_block_size probabilities = [np.prod([pmf[x] for x in word]) for word in self.sourcewords] lengths = [len(word) for word in self.sourcewords] - return self.target_block_size / np.dot(lengths, probabilities) + return n / np.dot(lengths, probabilities) def encode(self, input: npt.ArrayLike) -> npt.NDArray[np.integer]: r""" @@ -393,7 +394,7 @@ def decode(self, input: npt.ArrayLike) -> npt.NDArray[np.integer]: >>> code.decode([1, 1, 0, 0, 1]) # Not a multiple of target block size Traceback (most recent call last): ... - ValueError: length of 'input' must be a multiple of block size 2 (got 5) + ValueError: length of input must be a multiple of block size 2 (got 5) >>> code.decode([0, 0, 1, 1]) # 11 is not a valid target word Traceback (most recent call last): diff --git a/src/komm/_lossless_coding/util.py b/src/komm/_lossless_coding/util.py index ff9461ce..00b47cb1 100644 --- a/src/komm/_lossless_coding/util.py +++ b/src/komm/_lossless_coding/util.py @@ -19,7 +19,7 @@ def is_prefix_free(words: list[Word]) -> bool: return True -def is_uniquely_decipherable(words: list[Word]) -> bool: +def is_uniquely_parsable(words: list[Word]) -> bool: # Sardinas–Patterson algorithm. See [Say06, Sec. 2.4.1]. augmented_words = set(words) while True: @@ -45,7 +45,7 @@ def parse_fixed_length( ) -> npt.NDArray[np.integer]: if input.size % block_size != 0: raise ValueError( - "length of 'input' must be a multiple of block size" + "length of input must be a multiple of block size" f" {block_size} (got {len(input)})" ) try: @@ -63,23 +63,21 @@ def parse_prefix_free( allow_incomplete: bool, ) -> npt.NDArray[np.integer]: output: list[int] = [] - i, j = 0, 0 - while j < len(input): - j += 1 - key = tuple(input[i:j]) + i = 0 + for j in range(len(input)): + key = tuple(input[i : j + 1]) if key in dictionary: output.extend(dictionary[key]) - i = j + i = j + 1 if i == len(input): return np.asarray(output) - - if not allow_incomplete: + elif not allow_incomplete: raise ValueError("input contains invalid word") - remaining = tuple(input[i:]) + remainder = tuple(input[i:]) for key, value in dictionary.items(): - if is_prefix_of(remaining, key): + if is_prefix_of(remainder, key): output.extend(value) return np.asarray(output) diff --git a/tests/lossless_coding/test_codes.py b/tests/lossless_coding/test_codes.py index 1dcf8677..a8d29e84 100644 --- a/tests/lossless_coding/test_codes.py +++ b/tests/lossless_coding/test_codes.py @@ -3,98 +3,98 @@ from komm._lossless_coding.util import ( is_fully_covering, is_prefix_free, - is_uniquely_decipherable, + is_uniquely_parsable, ) test_cases = [ { "words": [(0, 0), (0, 1), (1, 0), (1, 1)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": True, }, { # [Say06, Sec. 2.4.1, Code 2] "words": [(0,), (1,), (0, 0), (1, 1)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { # [Say06, Sec. 2.4.1, Code 3] "words": [(0,), (1, 0), (1, 1, 0), (1, 1, 1)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": True, }, { # [Say06, Sec. 2.4.1, Code 4] "words": [(0,), (0, 1), (0, 1, 1), (0, 1, 1, 1)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { # [Say06, Sec. 2.4.1, Code 5] "words": [(0,), (0, 1), (1, 1)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { # [Say06, Sec. 2.4.1, Code 6] "words": [(0,), (0, 1), (1, 0)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { # [CT06, Sec. 5.1, Table 5.1, Code 2] "words": [(0,), (0, 1, 0), (0, 1), (1, 0)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { # [CT06, Sec. 5.1, Table 5.1, Code 3] "words": [(1, 0), (0, 0), (1, 1), (1, 1, 0)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { # Wikipedia example "words": [(1,), (0, 1, 1), (0, 1, 1, 1, 0), (1, 1, 1, 0), (1, 0, 0, 1, 1)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { # Old false negative for UD "words": [(0,), (0, 1), (1, 1, 0)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { "words": [(0,), (0, 0)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { # [Sedgewick-Wayne, Exercise 5.5.2]: "Any suffix-free code is uniquely decodable" "words": [(0,), (0, 1)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { # [Sedgewick-Wayne, Exercise 5.5.3] "words": [(0, 0, 1, 1), (0, 1, 1), (1, 1), (1, 1, 1, 0)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { # [Sedgewick-Wayne, Exercise 5.5.3] "words": [(0, 1), (1, 0), (0, 1, 1), (1, 1, 0)], - "uniquely_decipherable": False, # Course page is wrong: (01)(110) = (011)(10) + "uniquely_parsable": False, # Course page is wrong: (01)(110) = (011)(10) "prefix_free": False, }, { # [Sedgewick-Wayne, Exercise 5.5.4] "words": [(1,), (1, 0, 0, 0, 0, 0), (0, 0)], - "uniquely_decipherable": True, + "uniquely_parsable": True, "prefix_free": False, }, { # [Sedgewick-Wayne, Exercise 5.5.4] "words": [(0, 1), (1, 0, 0, 1), (1, 0, 1, 1), (1, 1, 1), (1, 1, 1, 0)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { # [Sedgewick-Wayne, Exercise 5.5.4] "words": [(1,), (0, 1, 1), (0, 1, 1, 1, 0), (1, 1, 1, 0), (1, 0, 0, 1, 1)], - "uniquely_decipherable": False, + "uniquely_parsable": False, "prefix_free": False, }, { "words": [(1, 0, 1), (0, 0, 1, 1, 1), (0, 1, 0, 0), (1,)], - "uniquely_decipherable": False, # (101)(00111) = (1)(0100)(1)(1)(1) + "uniquely_parsable": False, # (101)(00111) = (1)(0100)(1)(1)(1) "prefix_free": False, }, ] @@ -109,11 +109,11 @@ def test_is_prefix_free(words, expected_prefix_free): @pytest.mark.parametrize( - "words, expected_uniquely_decipherable", - [(case["words"], case["uniquely_decipherable"]) for case in test_cases], + "words, expected_uniquely_parsable", + [(case["words"], case["uniquely_parsable"]) for case in test_cases], ) -def test_is_uniquely_decipherable(words, expected_uniquely_decipherable): - assert is_uniquely_decipherable(words) == expected_uniquely_decipherable +def test_is_uniquely_parsable(words, expected_uniquely_parsable): + assert is_uniquely_parsable(words) == expected_uniquely_parsable @pytest.mark.parametrize( From b2ee769d5293160b9e0bf3c519c8dcf56a83699c Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Wed, 1 Jan 2025 01:17:23 -0300 Subject: [PATCH 3/8] test: update tests for Huffman and Tunstall codes --- src/komm/_util/information_theory.py | 5 ++++ tests/lossless_coding/test_huffman_code.py | 26 +++++++++++++++++---- tests/lossless_coding/test_tunstall_code.py | 20 +++++++++------- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/src/komm/_util/information_theory.py b/src/komm/_util/information_theory.py index 16960b1b..a849cea1 100644 --- a/src/komm/_util/information_theory.py +++ b/src/komm/_util/information_theory.py @@ -37,6 +37,11 @@ def __array_finalize__(self, obj: npt.NDArray[np.floating] | None) -> None: return +def random_pmf(size: int) -> PMF: + pmf = np.random.rand(size) + return PMF(pmf / pmf.sum()) + + class TransitionMatrix(npt.NDArray[np.floating]): def __new__(cls, values: npt.ArrayLike): arr = np.asarray(values, dtype=float) diff --git a/tests/lossless_coding/test_huffman_code.py b/tests/lossless_coding/test_huffman_code.py index ce9b4002..b7f0f1d9 100644 --- a/tests/lossless_coding/test_huffman_code.py +++ b/tests/lossless_coding/test_huffman_code.py @@ -2,6 +2,7 @@ import pytest import komm +from komm._util.information_theory import random_pmf def test_huffman_code_1(): @@ -81,12 +82,29 @@ def test_huffman_code_invalid_call(): komm.HuffmanCode([0.5, 0.5], policy="unknown") # type: ignore -@pytest.mark.parametrize("source_cardinality", [2, 3, 4, 5, 6]) -@pytest.mark.parametrize("source_block_size", [1, 2]) +@pytest.mark.parametrize("source_cardinality", range(2, 7)) +@pytest.mark.parametrize("source_block_size", range(1, 4)) +@pytest.mark.parametrize("policy", ["high", "low"]) +def test_huffman_code_random_pmf(source_cardinality, source_block_size, policy): + for _ in range(10): + pmf = random_pmf(source_cardinality) + code = komm.HuffmanCode(pmf, source_block_size=source_block_size, policy=policy) + assert code.is_uniquely_decodable() + assert code.is_prefix_free() + assert code.kraft_parameter() <= 1 + entropy = komm.entropy(pmf) + assert entropy <= code.rate(pmf) <= entropy + 1 / source_block_size + # Permute pmf and check if the rate is the same. + pmf1 = pmf[np.random.permutation(source_cardinality)] + code1 = komm.HuffmanCode(pmf1, source_block_size, policy) + np.testing.assert_almost_equal(code.rate(pmf), code1.rate(pmf1)) + + +@pytest.mark.parametrize("source_cardinality", range(2, 7)) +@pytest.mark.parametrize("source_block_size", range(1, 4)) @pytest.mark.parametrize("policy", ["high", "low"]) def test_huffman_code_encode_decode(source_cardinality, source_block_size, policy): - integers = np.random.randint(0, 100, source_cardinality) - pmf = integers / integers.sum() + pmf = random_pmf(source_cardinality) dms = komm.DiscreteMemorylessSource(pmf) code = komm.HuffmanCode(pmf, source_block_size=source_block_size, policy=policy) x = dms(1000 * source_block_size) diff --git a/tests/lossless_coding/test_tunstall_code.py b/tests/lossless_coding/test_tunstall_code.py index e62a3cbf..481e2654 100644 --- a/tests/lossless_coding/test_tunstall_code.py +++ b/tests/lossless_coding/test_tunstall_code.py @@ -2,6 +2,7 @@ import pytest import komm +from komm._util.information_theory import random_pmf def test_tunstall_code(): @@ -28,26 +29,29 @@ def test_tunstall_code_invalid_init(): komm.TunstallCode([0.5, 0.5], 0) -@pytest.mark.parametrize("source_cardinality", range(2, 10)) +@pytest.mark.parametrize("source_cardinality", range(2, 9)) @pytest.mark.parametrize("target_block_size", range(1, 7)) -def test_random_tunstall_code(source_cardinality, target_block_size): +def test_tunstall_code_random_pmf(source_cardinality, target_block_size): if 2**target_block_size < source_cardinality: # target block size too low return for _ in range(10): - pmf = np.random.rand(source_cardinality) - pmf /= pmf.sum() + pmf = random_pmf(source_cardinality) code = komm.TunstallCode(pmf, target_block_size) - assert code.is_prefix_free() assert code.is_fully_covering() + assert code.is_uniquely_encodable() + assert code.is_prefix_free() + # Permute pmf and check if the rate is the same. + pmf1 = pmf[np.random.permutation(source_cardinality)] + code1 = komm.TunstallCode(pmf1, target_block_size) + np.testing.assert_almost_equal(code.rate(pmf), code1.rate(pmf1)) -@pytest.mark.parametrize("source_cardinality", range(2, 10)) +@pytest.mark.parametrize("source_cardinality", range(2, 9)) @pytest.mark.parametrize("target_block_size", range(1, 7)) def test_tunstall_code_encode_decode(source_cardinality, target_block_size): if 2**target_block_size < source_cardinality: # target block size too low return - integers = np.random.randint(0, 100, source_cardinality) - pmf = integers / integers.sum() + pmf = random_pmf(source_cardinality) dms = komm.DiscreteMemorylessSource(pmf) code = komm.TunstallCode(pmf, target_block_size=target_block_size) x = dms(1000) From 252c1dd1b8a65d67644f2b2448bf9454babf8957 Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Wed, 1 Jan 2025 01:43:29 -0300 Subject: [PATCH 4/8] chore: update docstring of `WagnerDecoder` for consistency --- src/komm/_error_control_decoders/WagnerDecoder.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/komm/_error_control_decoders/WagnerDecoder.py b/src/komm/_error_control_decoders/WagnerDecoder.py index f19113dc..6cb470c1 100644 --- a/src/komm/_error_control_decoders/WagnerDecoder.py +++ b/src/komm/_error_control_decoders/WagnerDecoder.py @@ -11,7 +11,7 @@ @dataclass class WagnerDecoder(base.BlockDecoder[SingleParityCheckCode]): r""" - Wagner decoder for [single parity-check codes](/ref/SingleParityCheckCode). For more information, see CF07, Sec. III.C. + Wagner decoder for [single parity-check codes](/ref/SingleParityCheckCode). For more details, see CF07, Sec. III.C. Parameters: code: The single parity-check code to be used for decoding. From 725a121b2772a88a26e7da4add88830c783a64f8 Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Thu, 2 Jan 2025 22:58:57 -0300 Subject: [PATCH 5/8] feat: implement Shanon and Fano codes --- site/toc.yaml | 2 + src/komm/_lossless_coding/FanoCode.py | 72 ++++++++++++++++++++ src/komm/_lossless_coding/ShannonCode.py | 79 ++++++++++++++++++++++ src/komm/_lossless_coding/__init__.py | 4 ++ src/komm/_lossless_coding/util.py | 15 ++++ tests/lossless_coding/test_fano_code.py | 37 ++++++++++ tests/lossless_coding/test_shannon_code.py | 48 +++++++++++++ 7 files changed, 257 insertions(+) create mode 100644 src/komm/_lossless_coding/FanoCode.py create mode 100644 src/komm/_lossless_coding/ShannonCode.py create mode 100644 tests/lossless_coding/test_fano_code.py create mode 100644 tests/lossless_coding/test_shannon_code.py diff --git a/site/toc.yaml b/site/toc.yaml index a0222ce9..5f86b68c 100644 --- a/site/toc.yaml +++ b/site/toc.yaml @@ -73,6 +73,8 @@ Sequences: Source coding: Lossless coding: - FixedToVariableCode + - ShannonCode + - FanoCode - HuffmanCode - VariableToFixedCode - TunstallCode diff --git a/src/komm/_lossless_coding/FanoCode.py b/src/komm/_lossless_coding/FanoCode.py new file mode 100644 index 00000000..31e4bdf7 --- /dev/null +++ b/src/komm/_lossless_coding/FanoCode.py @@ -0,0 +1,72 @@ +import numpy as np +import numpy.typing as npt + +from .._util.information_theory import PMF +from .FixedToVariableCode import FixedToVariableCode +from .util import Word, empty_mapping, extended_probabilities + + +def FanoCode( + pmf: npt.ArrayLike, + source_block_size: int = 1, +) -> FixedToVariableCode: + r""" + Binary Fano code. It is a [fixed-to-variable length code](/ref/FixedToVariableCode) in which the source words are first sorted in descending order of probability and then are recursively partitioned into two groups of approximately equal total probability, assigning bit $\mathtt{0}$ to one group and bit $\mathtt{1}$ to the other, until each source word is assigned a unique codeword. For more details, see [Wikipedia: Shannon–Fano coding](https://en.wikipedia.org/wiki/Shannon%E2%80%93Fano_coding). + + Notes: + Fano codes are always [prefix-free](/ref/FixedToVariableCode/#is_prefix_free) (hence [uniquely decodable](/ref/FixedToVariableCode/#is_uniquely_decodable)). + + Parameters: + pmf: The probability mass function of the source. + source_block_size: The source block size $k$. The default value is $k = 1$. + + Examples: + >>> pmf = [0.7, 0.15, 0.15] + + >>> code = komm.FanoCode(pmf, 1) + >>> code.enc_mapping # doctest: +NORMALIZE_WHITESPACE + {(0,): (0,), + (1,): (1, 0), + (2,): (1, 1)} + >>> code.rate(pmf) # doctest: +NUMBER + np.float64(1.3) + + >>> code = komm.FanoCode(pmf, 2) + >>> code.enc_mapping # doctest: +NORMALIZE_WHITESPACE + {(0, 0): (0,), + (0, 1): (1, 0, 0), + (0, 2): (1, 0, 1), + (1, 0): (1, 1, 0), + (1, 1): (1, 1, 1, 1, 0, 0), + (1, 2): (1, 1, 1, 1, 0, 1), + (2, 0): (1, 1, 1, 0), + (2, 1): (1, 1, 1, 1, 1, 0), + (2, 2): (1, 1, 1, 1, 1, 1)} + >>> code.rate(pmf) # doctest: +NUMBER + np.float64(1.1975) + """ + pmf = PMF(pmf) + return FixedToVariableCode( + source_cardinality=pmf.size, + target_cardinality=2, + source_block_size=source_block_size, + enc_mapping=fano_algorithm(pmf, source_block_size), + ) + + +def fano_algorithm(pmf: PMF, source_block_size: int) -> dict[Word, Word]: + enc_mapping = empty_mapping(pmf.size, source_block_size) + xpmf = extended_probabilities(pmf, source_block_size) + stack: list[tuple[list[tuple[Word, float]], Word]] = [(xpmf, ())] + while stack: + current_pmf, prefix = stack.pop() + if len(current_pmf) == 1: + u, _ = current_pmf[0] + enc_mapping[u] = prefix + continue + probs = [p for _, p in current_pmf] + total = np.sum(probs) + index = np.argmin(np.abs(np.cumsum(probs) - total / 2)) + stack.append((current_pmf[index + 1 :], prefix + (1,))) + stack.append((current_pmf[: index + 1], prefix + (0,))) + return enc_mapping diff --git a/src/komm/_lossless_coding/ShannonCode.py b/src/komm/_lossless_coding/ShannonCode.py new file mode 100644 index 00000000..114ed78b --- /dev/null +++ b/src/komm/_lossless_coding/ShannonCode.py @@ -0,0 +1,79 @@ +from math import ceil, log2 + +import numpy.typing as npt + +from .._util.information_theory import PMF +from .FixedToVariableCode import FixedToVariableCode +from .util import Word, empty_mapping, extended_probabilities + + +def ShannonCode( + pmf: npt.ArrayLike, + source_block_size: int = 1, +) -> FixedToVariableCode: + r""" + Binary Shannon code. It is a [fixed-to-variable length code](/ref/FixedToVariableCode) in which the length of the codeword $\Enc(u)$ for a source symbol $u \in \mathcal{S}^k$ is given by + $$ + \ell_u = \left\lceil \log_2 \frac{1}{p_u} \right\rceil, + $$ + where $p_u$ is the probability of the source symbol $u$. This function implements the lexicographic order assignment as described in [Wikipedia: Shannon–Fano coding](https://en.wikipedia.org/wiki/Shannon%E2%80%93Fano_coding). + + Notes: + Shannon codes are always [prefix-free](/ref/FixedToVariableCode/#is_prefix_free) (hence [uniquely decodable](/ref/FixedToVariableCode/#is_uniquely_decodable)). + + Parameters: + pmf: The probability mass function of the source. + source_block_size: The source block size $k$. The default value is $k = 1$. + + Examples: + >>> pmf = [0.7, 0.15, 0.15] + + >>> code = komm.ShannonCode(pmf, 1) + >>> code.enc_mapping # doctest: +NORMALIZE_WHITESPACE + {(0,): (0,), + (1,): (1, 0, 0), + (2,): (1, 0, 1)} + >>> code.rate(pmf) # doctest: +NUMBER + np.float64(1.6) + + >>> code = komm.ShannonCode(pmf, 2) + >>> code.enc_mapping # doctest: +NORMALIZE_WHITESPACE + {(0, 0): (0, 0), + (0, 1): (0, 1, 0, 0), + (0, 2): (0, 1, 0, 1), + (1, 0): (0, 1, 1, 0), + (1, 1): (1, 0, 0, 0, 0, 0), + (1, 2): (1, 0, 0, 0, 0, 1), + (2, 0): (0, 1, 1, 1), + (2, 1): (1, 0, 0, 0, 1, 0), + (2, 2): (1, 0, 0, 0, 1, 1)} + >>> code.rate(pmf) # doctest: +NUMBER + np.float64(1.6) + """ + pmf = PMF(pmf) + return FixedToVariableCode( + source_cardinality=pmf.size, + target_cardinality=2, + source_block_size=source_block_size, + enc_mapping=shannon_code(pmf, source_block_size), + ) + + +def next_in_lexicographic_order(word: Word) -> Word: + word_list = list(word) + for i in range(len(word_list) - 1, -1, -1): + if word_list[i] == 0: + word_list[i] = 1 + break + word_list[i] = 0 + return tuple(word_list) + + +def shannon_code(pmf: PMF, source_block_size: int) -> dict[Word, Word]: + enc_mapping = empty_mapping(pmf.size, source_block_size) + v = () + for u, pu in extended_probabilities(pmf, source_block_size): + length = ceil(log2(1 / pu)) + v = next_in_lexicographic_order(v) + (0,) * (length - len(v)) + enc_mapping[u] = v + return enc_mapping diff --git a/src/komm/_lossless_coding/__init__.py b/src/komm/_lossless_coding/__init__.py index 02601bae..527980dd 100644 --- a/src/komm/_lossless_coding/__init__.py +++ b/src/komm/_lossless_coding/__init__.py @@ -1,11 +1,15 @@ +from .FanoCode import FanoCode from .FixedToVariableCode import FixedToVariableCode from .HuffmanCode import HuffmanCode +from .ShannonCode import ShannonCode from .TunstallCode import TunstallCode from .VariableToFixedCode import VariableToFixedCode __all__ = [ + "FanoCode", "FixedToVariableCode", "HuffmanCode", + "ShannonCode", "TunstallCode", "VariableToFixedCode", ] diff --git a/src/komm/_lossless_coding/util.py b/src/komm/_lossless_coding/util.py index 00b47cb1..4f3276c9 100644 --- a/src/komm/_lossless_coding/util.py +++ b/src/komm/_lossless_coding/util.py @@ -1,8 +1,11 @@ from itertools import product +from math import prod import numpy as np import numpy.typing as npt +from .._util.information_theory import PMF + Word = tuple[int, ...] @@ -124,3 +127,15 @@ def check_coverage_from_node(node: TrieNode, visited: set[int]) -> bool: root = build_trie(words) return check_coverage_from_node(root, set()) + + +def extended_probabilities(pmf: PMF, k: int) -> list[tuple[Word, float]]: + probs: list[tuple[float, Word]] = [] + for u in product(range(pmf.size), repeat=k): + pu = prod(pmf[list(u)]) + probs.append((-pu, u)) + return [(u, -p) for p, u in sorted(probs)] + + +def empty_mapping(cardinality: int, block_size: int) -> dict[Word, Word]: + return {x: () for x in product(range(cardinality), repeat=block_size)} diff --git a/tests/lossless_coding/test_fano_code.py b/tests/lossless_coding/test_fano_code.py new file mode 100644 index 00000000..f67f2d67 --- /dev/null +++ b/tests/lossless_coding/test_fano_code.py @@ -0,0 +1,37 @@ +import numpy as np +import pytest + +import komm +from komm._util.information_theory import random_pmf + + +def test_fano_code_wikipedia(): + pmf = np.array([15, 7, 6, 6, 5]) / 39 + code = komm.FanoCode(pmf) + assert code.enc_mapping == { + (0,): (0, 0), + (1,): (0, 1), + (2,): (1, 0), + (3,): (1, 1, 0), + (4,): (1, 1, 1), + } + np.testing.assert_almost_equal(code.rate(pmf), 89 / 39) + + +@pytest.mark.parametrize("source_cardinality", range(2, 7)) +@pytest.mark.parametrize("source_block_size", range(1, 4)) +def test_fano_code_random_pmf(source_cardinality, source_block_size): + for _ in range(10): + pmf = random_pmf(source_cardinality) + code = komm.FanoCode(pmf, source_block_size) + assert code.is_uniquely_decodable() + assert code.is_prefix_free() + assert code.kraft_parameter() <= 1 + entropy = komm.entropy(pmf) + min_p = np.min(pmf) + # For the upper bound below, see [Krajči et al., 2015, apud Wikipedia]. + assert entropy <= code.rate(pmf) <= entropy + (1 - min_p) / source_block_size + # Permute pmf and check if the rate is the same. + pmf1 = pmf[np.random.permutation(source_cardinality)] + code1 = komm.FanoCode(pmf1, source_block_size) + np.testing.assert_almost_equal(code.rate(pmf), code1.rate(pmf1)) diff --git a/tests/lossless_coding/test_shannon_code.py b/tests/lossless_coding/test_shannon_code.py new file mode 100644 index 00000000..9c8fa189 --- /dev/null +++ b/tests/lossless_coding/test_shannon_code.py @@ -0,0 +1,48 @@ +import numpy as np +import pytest + +import komm +from komm._util.information_theory import random_pmf + + +def test_shannon_code_wikipedia_1(): + pmf = np.array([15, 7, 6, 6, 5]) / 39 + code = komm.ShannonCode(pmf) + assert code.enc_mapping == { + (0,): (0, 0), + (1,): (0, 1, 0), + (2,): (0, 1, 1), + (3,): (1, 0, 0), + (4,): (1, 0, 1), + } + np.testing.assert_almost_equal(code.rate(pmf), 102 / 39) + + +def test_shannon_code_wikipedia_2(): + pmf = [0.36, 0.18, 0.18, 0.12, 0.09, 0.07] + code = komm.ShannonCode(pmf) + assert code.enc_mapping == { + (0,): (0, 0), + (1,): (0, 1, 0), + (2,): (0, 1, 1), + (3,): (1, 0, 0, 0), + (4,): (1, 0, 0, 1), + (5,): (1, 0, 1, 0), + } + + +@pytest.mark.parametrize("source_cardinality", range(2, 7)) +@pytest.mark.parametrize("source_block_size", range(1, 4)) +def test_shannon_code_random_pmf(source_cardinality, source_block_size): + for _ in range(10): + pmf = random_pmf(source_cardinality) + code = komm.ShannonCode(pmf, source_block_size) + assert code.is_uniquely_decodable() + assert code.is_prefix_free() + assert code.kraft_parameter() <= 1 + entropy = komm.entropy(pmf) + assert entropy <= code.rate(pmf) <= entropy + 1 / source_block_size + # Permute pmf and check if the rate is the same. + pmf1 = pmf[np.random.permutation(source_cardinality)] + code1 = komm.ShannonCode(pmf1, source_block_size) + np.testing.assert_almost_equal(code.rate(pmf), code1.rate(pmf1)) From f951bebc9d66e258aeedd576af7f12336133873e Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Fri, 3 Jan 2025 00:01:19 -0300 Subject: [PATCH 6/8] feat: add progress bars for code generation in Fano, Huffman, Shannon, Tunstall algorithms --- src/komm/_lossless_coding/FanoCode.py | 13 ++++++++++++- src/komm/_lossless_coding/HuffmanCode.py | 20 ++++++++++++++++---- src/komm/_lossless_coding/ShannonCode.py | 13 ++++++++++++- src/komm/_lossless_coding/TunstallCode.py | 14 +++++++++++--- src/komm/_lossless_coding/util.py | 5 ++++- 5 files changed, 55 insertions(+), 10 deletions(-) diff --git a/src/komm/_lossless_coding/FanoCode.py b/src/komm/_lossless_coding/FanoCode.py index 31e4bdf7..58f6ad0d 100644 --- a/src/komm/_lossless_coding/FanoCode.py +++ b/src/komm/_lossless_coding/FanoCode.py @@ -1,5 +1,6 @@ import numpy as np import numpy.typing as npt +from tqdm import tqdm from .._util.information_theory import PMF from .FixedToVariableCode import FixedToVariableCode @@ -55,18 +56,28 @@ def FanoCode( def fano_algorithm(pmf: PMF, source_block_size: int) -> dict[Word, Word]: + pbar = tqdm( + desc="Generating Fano code", + total=2 * pmf.size**source_block_size, + delay=2.5, + ) + enc_mapping = empty_mapping(pmf.size, source_block_size) - xpmf = extended_probabilities(pmf, source_block_size) + xpmf = extended_probabilities(pmf, source_block_size, pbar) stack: list[tuple[list[tuple[Word, float]], Word]] = [(xpmf, ())] while stack: current_pmf, prefix = stack.pop() if len(current_pmf) == 1: u, _ = current_pmf[0] enc_mapping[u] = prefix + pbar.update() continue probs = [p for _, p in current_pmf] total = np.sum(probs) index = np.argmin(np.abs(np.cumsum(probs) - total / 2)) stack.append((current_pmf[index + 1 :], prefix + (1,))) stack.append((current_pmf[: index + 1], prefix + (0,))) + + pbar.close() + return enc_mapping diff --git a/src/komm/_lossless_coding/HuffmanCode.py b/src/komm/_lossless_coding/HuffmanCode.py index 8fbd1081..a58f2f2f 100644 --- a/src/komm/_lossless_coding/HuffmanCode.py +++ b/src/komm/_lossless_coding/HuffmanCode.py @@ -5,6 +5,7 @@ from typing import Literal import numpy.typing as npt +from tqdm import tqdm from typing_extensions import Self from .._util.information_theory import PMF @@ -80,10 +81,17 @@ def __lt__(self, other: Self) -> bool: elif policy == "low": return (p0, -i0) < (p1, -i1) - tree = [ - Node(index, prod(probs)) - for (index, probs) in enumerate(product(pmf, repeat=source_block_size)) - ] + pbar = tqdm( + desc="Generating Huffman code", + total=3 * pmf.size**source_block_size - 1, + delay=2.5, + ) + + tree: list[Node] = [] + for index, probs in enumerate(product(pmf, repeat=source_block_size)): + tree.append(Node(index, prod(probs))) + pbar.update() + heap = tree.copy() heapify(heap) while len(heap) > 1: @@ -95,6 +103,7 @@ def __lt__(self, other: Self) -> bool: node0.parent = node1.parent = node.index heappush(heap, node) tree.append(node) + pbar.update() codewords: list[Word] = [] for index in range(pmf.size**source_block_size): @@ -104,5 +113,8 @@ def __lt__(self, other: Self) -> bool: bits.append(node.bit) node = tree[node.parent] codewords.append(tuple(reversed(bits))) + pbar.update() + + pbar.close() return codewords diff --git a/src/komm/_lossless_coding/ShannonCode.py b/src/komm/_lossless_coding/ShannonCode.py index 114ed78b..91ea6600 100644 --- a/src/komm/_lossless_coding/ShannonCode.py +++ b/src/komm/_lossless_coding/ShannonCode.py @@ -1,6 +1,7 @@ from math import ceil, log2 import numpy.typing as npt +from tqdm import tqdm from .._util.information_theory import PMF from .FixedToVariableCode import FixedToVariableCode @@ -70,10 +71,20 @@ def next_in_lexicographic_order(word: Word) -> Word: def shannon_code(pmf: PMF, source_block_size: int) -> dict[Word, Word]: + pbar = tqdm( + desc="Generating Shannon code", + total=2 * pmf.size**source_block_size, + delay=2.5, + ) + enc_mapping = empty_mapping(pmf.size, source_block_size) v = () - for u, pu in extended_probabilities(pmf, source_block_size): + for u, pu in extended_probabilities(pmf, source_block_size, pbar): length = ceil(log2(1 / pu)) v = next_in_lexicographic_order(v) + (0,) * (length - len(v)) enc_mapping[u] = v + pbar.update() + + pbar.close() + return enc_mapping diff --git a/src/komm/_lossless_coding/TunstallCode.py b/src/komm/_lossless_coding/TunstallCode.py index b3647939..1c59f3db 100644 --- a/src/komm/_lossless_coding/TunstallCode.py +++ b/src/komm/_lossless_coding/TunstallCode.py @@ -3,6 +3,7 @@ from math import ceil, log2 import numpy.typing as npt +from tqdm import tqdm from typing_extensions import Self from .._util.information_theory import PMF @@ -67,14 +68,21 @@ class Node: def __lt__(self, other: Self) -> bool: return -self.probability < -other.probability + pbar = tqdm( + desc="Generating Tunstall code", + total=2 ** (code_block_size - 1) - pmf.size + 1, + delay=2.5, + ) + heap = [Node((symbol,), probability) for (symbol, probability) in enumerate(pmf)] heapify(heap) - while len(heap) + pmf.size - 1 < 2**code_block_size: node = heappop(heap) for symbol, probability in enumerate(pmf): new_node = Node(node.sourceword + (symbol,), node.probability * probability) heappush(heap, new_node) - sourcewords = sorted(node.sourceword for node in heap) + pbar.update() + + pbar.close() - return sourcewords + return sorted(node.sourceword for node in heap) diff --git a/src/komm/_lossless_coding/util.py b/src/komm/_lossless_coding/util.py index 4f3276c9..68ae1e4e 100644 --- a/src/komm/_lossless_coding/util.py +++ b/src/komm/_lossless_coding/util.py @@ -1,5 +1,6 @@ from itertools import product from math import prod +from typing import Any import numpy as np import numpy.typing as npt @@ -129,11 +130,13 @@ def check_coverage_from_node(node: TrieNode, visited: set[int]) -> bool: return check_coverage_from_node(root, set()) -def extended_probabilities(pmf: PMF, k: int) -> list[tuple[Word, float]]: +def extended_probabilities(pmf: PMF, k: int, pbar: Any) -> list[tuple[Word, float]]: probs: list[tuple[float, Word]] = [] for u in product(range(pmf.size), repeat=k): pu = prod(pmf[list(u)]) probs.append((-pu, u)) + if pbar is not None: + pbar.update() return [(u, -p) for p, u in sorted(probs)] From fba50dc96900e02af7a96bdcc894c84f9de9a27f Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Fri, 3 Jan 2025 00:08:03 -0300 Subject: [PATCH 7/8] test: use `pytest-repeat` plugin --- pyproject.toml | 1 + tests/error_control_block/test_block_code.py | 4 ++-- tests/error_control_block/test_block_code_mappings.py | 8 ++++---- tests/error_control_block/test_systematic_block_code.py | 4 ++-- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 9e9f1a1a..9424d1be 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ lint = [ ] test = [ "pytest==8.3.3", + "pytest-repeat==0.9.3", "pytest-benchmark==5.1.0", "pytest-cov==6.0.0", ] diff --git a/tests/error_control_block/test_block_code.py b/tests/error_control_block/test_block_code.py index baff8657..dcfdb492 100644 --- a/tests/error_control_block/test_block_code.py +++ b/tests/error_control_block/test_block_code.py @@ -64,8 +64,8 @@ def test_block_code(): np.testing.assert_equal(code.covering_radius(), 3) -@pytest.mark.parametrize("execution_number", range(20)) -def test_block_code_mappings(execution_number): +@pytest.mark.repeat(20) +def test_block_code_mappings(): while True: code = komm.BlockCode(generator_matrix=np.random.randint(0, 2, (4, 8))) if rank(code.generator_matrix) == code.dimension: diff --git a/tests/error_control_block/test_block_code_mappings.py b/tests/error_control_block/test_block_code_mappings.py index 99d7dd9b..45230ac2 100644 --- a/tests/error_control_block/test_block_code_mappings.py +++ b/tests/error_control_block/test_block_code_mappings.py @@ -11,8 +11,8 @@ @pytest.mark.parametrize("code", [block, systematic, cyclic, terminated]) -@pytest.mark.parametrize("execution_number", range(20)) -def test_mappings_array_input(code: komm.abc.BlockCode, execution_number: int): +@pytest.mark.repeat(20) +def test_mappings_array_input(code: komm.abc.BlockCode): u1 = np.random.randint(0, 2, code.dimension) u2 = np.random.randint(0, 2, code.dimension) v1 = code.encode(u1) @@ -32,8 +32,8 @@ def test_mappings_array_input(code: komm.abc.BlockCode, execution_number: int): @pytest.mark.parametrize("code", [block, systematic, cyclic, terminated]) -@pytest.mark.parametrize("execution_number", range(20)) -def test_mappings_inverses(code: komm.abc.BlockCode, execution_number: int): +@pytest.mark.repeat(20) +def test_mappings_inverses(code: komm.abc.BlockCode): # Check that 'inverse_encode' is the inverse of 'encode' u = np.random.randint(0, 2, (3, 4, code.dimension)) np.testing.assert_array_equal(u, code.inverse_encode(code.encode(u))) diff --git a/tests/error_control_block/test_systematic_block_code.py b/tests/error_control_block/test_systematic_block_code.py index bb2b6179..bd39d8b8 100644 --- a/tests/error_control_block/test_systematic_block_code.py +++ b/tests/error_control_block/test_systematic_block_code.py @@ -65,8 +65,8 @@ def test_systematic_block_code(): np.testing.assert_equal(code.covering_radius(), 3) -@pytest.mark.parametrize("execution_number", range(20)) -def test_systematic_block_code_mappings(execution_number): +@pytest.mark.repeat(20) +def test_systematic_block_code_mappings(): code = komm.SystematicBlockCode(parity_submatrix=np.random.randint(0, 2, (4, 4))) k, m = code.dimension, code.redundancy for _ in range(100): From f4423ee5c14e82791e0eb026eff5f2084c410913 Mon Sep 17 00:00:00 2001 From: Roberto Nobrega Date: Fri, 3 Jan 2025 00:11:42 -0300 Subject: [PATCH 8/8] bump: version 0.15.1 -> 0.16.0 --- CHANGELOG.md | 7 +++++++ pyproject.toml | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c9baf32..9f1af8b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,13 @@ > [!NOTE] > Changelog started with version v0.10.0. +## v0.16.0 (2025-01-03) + +### Added + +- Implemented [Shannon](https://komm.dev/ref/ShannonCode) and [Fano](https://komm.dev/ref/FanoCode) codes. +- Implemented method to compute the [Kraft parameter](https://komm.dev/ref/FixedToVariableCode#kraft_parameter) of a fixed-to-variable code. + ## v0.15.1 (2024-12-31) ### Fixed diff --git a/pyproject.toml b/pyproject.toml index 9424d1be..596ca507 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "komm" -version = "0.15.1" +version = "0.16.0" description = "An open-source library for Python 3 providing tools for analysis and simulation of analog and digital communication systems." readme = "README.md" authors = [{ name = "Roberto W. Nobrega", email = "rwnobrega@gmail.com" }]