From fddebe28020452db07b9efe93045694ff01468c7 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 31 Aug 2023 08:29:10 +0200
Subject: [PATCH 01/29] use rabbitmq for brick to brick message passing

---
 requirements_prod.txt                         |   1 +
 setup.py                                      |   2 +-
 test/brick_runner/conftest.py                 |  29 ++-
 test/brick_runner/test_portMapping.py         |   6 +-
 test/brick_runner/test_runner.py              |  74 +++++--
 titanfe/apps/brick_runner/adapter.py          |   2 +-
 titanfe/apps/brick_runner/brick.py            | 150 +++++++------
 titanfe/apps/brick_runner/grid_manager.py     |   9 -
 titanfe/apps/brick_runner/input.py            | 197 ++++++++----------
 titanfe/apps/brick_runner/output.py           | 116 +++++++++++
 titanfe/apps/brick_runner/output/__init__.py  |  15 --
 titanfe/apps/brick_runner/output/consumer.py  |  62 ------
 titanfe/apps/brick_runner/output/group.py     | 193 -----------------
 titanfe/apps/brick_runner/output/output.py    |  79 -------
 titanfe/apps/brick_runner/output/port.py      |  47 -----
 titanfe/apps/brick_runner/packet.py           |  47 ++++-
 titanfe/apps/brick_runner/runner.py           | 156 +++-----------
 titanfe/apps/brick_runner/transport.py        |  79 +++++++
 .../{connection.py => value_mapping.py}       |   0
 titanfe/apps/control_peer/brick.py            | 163 ++++++++++-----
 titanfe/apps/control_peer/config.yaml         |   2 +-
 titanfe/apps/control_peer/runner.py           |   2 +-
 titanfe/apps/control_peer/services.py         |   2 +-
 titanfe/config.py                             |  63 +++---
 titanfe/connection.py                         |   6 +-
 titanfe/log.py                                |   2 +-
 titanfe/log_config.yml                        |  12 +-
 titanfe/utils.py                              |   6 +
 28 files changed, 695 insertions(+), 827 deletions(-)
 create mode 100644 titanfe/apps/brick_runner/output.py
 delete mode 100644 titanfe/apps/brick_runner/output/__init__.py
 delete mode 100644 titanfe/apps/brick_runner/output/consumer.py
 delete mode 100644 titanfe/apps/brick_runner/output/group.py
 delete mode 100644 titanfe/apps/brick_runner/output/output.py
 delete mode 100644 titanfe/apps/brick_runner/output/port.py
 create mode 100644 titanfe/apps/brick_runner/transport.py
 rename titanfe/apps/brick_runner/{connection.py => value_mapping.py} (100%)

diff --git a/requirements_prod.txt b/requirements_prod.txt
index d86897e..883bc26 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -15,6 +15,7 @@ dataclasses-json
 requests
 docopt
 pycryptodome
+aioamqp
 
 # linux only:
 uvloop == 0.13.*;platform_system=="Linux"
diff --git a/setup.py b/setup.py
index b9bf595..085415b 100644
--- a/setup.py
+++ b/setup.py
@@ -53,7 +53,7 @@ setup(
         "titanfe.apps.control_peer",
         "titanfe.apps.control_peer.webapi",
         "titanfe.apps.brick_runner",
-        "titanfe.apps.brick_runner.output",
+        #"titanfe.apps.brick_runner.output",
         "titanfe.apps.kafka_to_elastic",
         "titanfe.apps.kafka_viewer",
         "titanfe.testing",
diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index 9fab2b9..f84b333 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -8,16 +8,18 @@
 """
 Fixtures for BrickRunner-Tests
 """
-
+import asyncio
 # pylint: disable=redefined-outer-name
 import sys
 import types
 import logging
+from collections import defaultdict
 from pathlib import Path
 from unittest.mock import MagicMock, patch
 
 import pytest
 
+from apps.brick_runner.transport import RabbitMQ
 from titanfe.apps.brick_runner.metrics import MetricEmitter
 from titanfe.apps.brick_runner.runner import BrickRunner
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
@@ -66,10 +68,12 @@ class GridManagerDummy:
     register_runner = MagicMock()
 
 
+
+
 @pytest.mark.asyncio
 @pytest.fixture()
 async def brick_runner():
-    """setup a brick runner, with networking patched away"""
+    """set up a brick runner, with networking patched away"""
     guess_module_path = "titanfe.apps.control_peer.brick.BrickBaseDefinition.guess_module_path"
     with patch(guess_module_path, MagicMock(return_value=Path("n/a"))):
         brick_definition = BrickInstanceDefinition.from_gridmanager(
@@ -84,8 +88,14 @@ async def brick_runner():
                     "autoscale_queue_level": 25,
                     "autoscale_max_instances": 1,
                     "exit_after_idle_seconds": 0,
-                    "inputPorts": "",
-                    "outputPorts": "",
+                    "inputPorts": [],
+                    "outputPorts": [
+                        {
+                            "schema": 'test = variant;',
+                            "name": "Out",
+                            "typeName": "test",
+                        }
+                    ]
                 },
                 "FlowID": "1234-56-789-0",
                 "FlowName": "Test",
@@ -94,7 +104,7 @@ async def brick_runner():
                 "Outbound": {
                     "A": [
                         {
-                            "InstanceID": "dummy",
+                            "InstanceID": "Next-Dummy",
                             "autoscale_queue_level": 0,
                             "mapping": [
                                 dict(
@@ -105,6 +115,11 @@ async def brick_runner():
                                 )
                             ],
                             "buffer": {},
+                            "sourcePort": {
+                                "id": "Out",
+                                "typeName": "test",
+                                "schema": "test = variant;",
+                            },
                             "targetPort": {
                                 "id": "Input",
                                 "typeName": "test",
@@ -121,7 +136,9 @@ async def brick_runner():
 
     gridmanager = "titanfe.apps.brick_runner.runner.GridManager"
     getmodule = "titanfe.apps.brick_runner.brick.get_module"
-    with patch(gridmanager, MagicMock(return_value=GridManagerDummy())), patch(
+    rabbitmq = "titanfe.apps.brick_runner.transport.RabbitMQ"
+    # with patch(gridmanager, MagicMock(return_value=GridManagerDummy())), patch(
+    with patch(
         getmodule, MagicMock(return_value=module)
     ):
         brick_runner = await BrickRunner.create("R-Test", brick_definition)
diff --git a/test/brick_runner/test_portMapping.py b/test/brick_runner/test_portMapping.py
index eff2b0f..59b6ee4 100644
--- a/test/brick_runner/test_portMapping.py
+++ b/test/brick_runner/test_portMapping.py
@@ -4,15 +4,15 @@
 # Licensed under the Apache License, Version 2.0 (the "License");
 # found in the LICENSE file in the root directory of this source tree.
 #
-
 # pylint: skip-file
-from titanfe.apps.brick_runner.connection import MappingRules
+from apps.brick_runner.value_mapping import BufferDescription, MappingRules
+
 from titanfe.brick import BrickBase
 from titanfe.testing import TestRunner
 from titanfe.ujo_helper import python_to_ujo
 from ujotypes import UjoStringUTF8, UjoInt64, UjoFloat64, UjoBool, UjoMap
 from titanfe.apps.brick_runner.packet import Buffer
-from titanfe.apps.brick_runner.output.group import BufferDescription
+
 
 TestRunner.__test__ = False  # prevent PytestCollectionWarning "cannot collect test class"
 
diff --git a/test/brick_runner/test_runner.py b/test/brick_runner/test_runner.py
index c08519b..b409dde 100644
--- a/test/brick_runner/test_runner.py
+++ b/test/brick_runner/test_runner.py
@@ -12,16 +12,40 @@ Test the BrickRunner itself
 # pylint: disable=redefined-outer-name
 
 import asyncio
+from collections import defaultdict
 
 import pytest
+
+from apps.brick_runner.transport import RabbitMQ
 from ujotypes import UjoStringC
 
-from titanfe.apps.brick_runner.connection import Buffer
+from titanfe.apps.brick_runner.value_mapping import Buffer
 from titanfe.apps.brick_runner.packet import Packet
 from titanfe.brick import BrickBase
 from titanfe.constants import DEFAULT_PORT
 
 
+class RabbitMQDouble(RabbitMQ):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.IN = defaultdict(asyncio.Queue)
+        self.OUT = asyncio.Queue()
+
+    async def start_consumer(self, queue_name, callback):
+        message = await self.IN[queue_name].get()
+
+        async def done_callback():
+            self.IN[queue_name].task_done()
+
+        callback(message, done_callback())
+
+    async def stop_consumer(self, queue_name):
+        pass
+
+    async def publish(self, queue_name, message):
+        await self.OUT.put({"message": message, "exchange": self.exchange_name, "routing_key": queue_name})
+
+
 @pytest.mark.asyncio
 async def test_basic_packet_processing(brick_runner):  # noqa: F811
     """A Packet is taken from the input, processed within the Brick's module
@@ -35,20 +59,42 @@ async def test_basic_packet_processing(brick_runner):  # noqa: F811
 
     brick_runner.brick.module.Brick = Brick
 
-    runner_run = asyncio.create_task(brick_runner.run())
+    rmq_double = RabbitMQDouble("dummy_exchange")
+    brick_runner.input.transport = rmq_double
+    brick_runner.output.transport = rmq_double
 
-    output_group = next(iter(brick_runner.output["A"]))
-    await output_group.tasks.cancel()
+    runner_run = asyncio.create_task(brick_runner.run())
 
     payload = UjoStringC("InitialValue")
 
-    input_packet = Packet(uid="Test", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
-    await brick_runner.input.put(input_packet)
+    input_packet1 = Packet(uid="Test1", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
+    input_packet2 = Packet(uid="Test2", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
+    input_packet3 = Packet(uid="Test3", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
+
+    await brick_runner.input._packets.put((DEFAULT_PORT, input_packet1))
+    await brick_runner.input._packets.put((DEFAULT_PORT, input_packet2))
+    await brick_runner.input._packets.put((DEFAULT_PORT, input_packet3))
+
+    # {
+    #     'message': Packet(uid="Test", payload="NewValue"),
+    #     'exchange': 'dummy_exchange',
+    #     'routing_key': ('Next-Dummy:Input',)
+    # }
+
+    brick_output = await rmq_double.OUT.get()
+    packet = brick_output["message"]
+    assert packet.payload == UjoStringC("NewValue")
+    assert input_packet1.uid == packet.uid
 
-    output_packet = await output_group.packets.get()
+    brick_output = await rmq_double.OUT.get()
+    packet = brick_output["message"]
+    assert packet.payload == UjoStringC("NewValue")
+    assert input_packet2.uid == packet.uid
 
-    assert output_packet.payload == UjoStringC("NewValue")
-    assert input_packet.uid == output_packet.uid
+    brick_output = await rmq_double.OUT.get()
+    packet = brick_output["message"]
+    assert packet.payload == UjoStringC("NewValue")
+    assert input_packet3.uid == packet.uid
 
     await brick_runner.stop_processing()
     await runner_run
@@ -57,17 +103,21 @@ async def test_basic_packet_processing(brick_runner):  # noqa: F811
 @pytest.mark.asyncio
 async def test_exit_when_idle(brick_runner):  # noqa: F811
     """The runner should exit after being idle for a specific amount of time"""
-    max_idle_seconds = 0.1
+    max_idle_seconds = 0.2
 
     class Brick(BrickBase):
         def process(self, input, port):  # pylint: disable=unused-argument, redefined-builtin
             return UjoStringC("NewValue")
 
     brick_runner.brick.module.Brick = Brick
-
-    brick_runner.brick.exit_after_idle_seconds = max_idle_seconds
     brick_runner.brick.is_inlet = False
 
+    rmq_double = RabbitMQDouble("dummy_exchange")
+    brick_runner.input.transport = rmq_double
+    brick_runner.output.transport = rmq_double
+
+    await brick_runner.stop_processing()
+
     try:
         await asyncio.wait_for(brick_runner.run(), timeout=max_idle_seconds * 10)
     except TimeoutError:
diff --git a/titanfe/apps/brick_runner/adapter.py b/titanfe/apps/brick_runner/adapter.py
index ce2aa66..d8e05a5 100644
--- a/titanfe/apps/brick_runner/adapter.py
+++ b/titanfe/apps/brick_runner/adapter.py
@@ -72,7 +72,7 @@ class BrickAdapter:  # pylint: disable=too-few-public-methods
         self.log.debug(
             "brick emitted new value: %r , port: %s", value, port or self.__default_port
         )
-        self.__put_packet((value, port or self.__default_port))
+        self.__put_packet(value, port or self.__default_port)
 
     def decrypt_parameter(self, parameter):  # pylint: disable=no-self-use
         """Decrypt a secret parameter using AES GCM
diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index 1eb9de3..20befbf 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -21,61 +21,77 @@ from titanfe.brick import InletBrickBase
 from titanfe.utils import get_module, time_delta_in_ms, Flag
 from titanfe.ujo_helper import python_to_ujo
 from .adapter import BrickAdapter, AdapterMeta
+from .input import Input
+from .output import Output
 from .packet import Packet
 from ...constants import DEFAULT_PORT
 
 PortMapping = namedtuple("PortMapping", ("rules", "type"))
 
 
+SENTINEL = object()
+
 class Brick:
     """Wraps all the Brick-Handling"""
 
     # pylint: disable=too-many-instance-attributes
-    def __init__(self, instance_definition: BrickInstanceDefinition, metric_emitter, logger):
+    def __init__(
+        self,
+        instance_definition: BrickInstanceDefinition,
+        metric_emitter,
+        logger,
+        output: Output,
+    ):
         self.metric_emitter = metric_emitter
+        self.output = output
 
         self.uid = instance_definition.uid
         self.name = instance_definition.name
 
         self.flow = instance_definition.flow
 
-        self.exit_after_idle_seconds = (
-            instance_definition.runtime_parameters.exit_after_idle_seconds
-        )
         self.processing_parameters = instance_definition.processing_parameters
 
-        self.default_port = next(iter(instance_definition.connections.output), DEFAULT_PORT)
+        self.default_port = next(
+            iter(instance_definition.connections.output), DEFAULT_PORT
+        )
         self.is_inlet = not instance_definition.connections.input
         self.is_outlet = not instance_definition.connections.output
 
         self.brick_type = instance_definition.base.name
         self.brick_family = instance_definition.base.family
 
-        context = logging.FlowContext(self.flow.uid, self.flow.name, self.uid, self.name)
+        context = logging.FlowContext(
+            self.flow.uid, self.flow.name, self.uid, self.name
+        )
         logging.global_context.update(context.asdict())
 
         self.log = logger.getChild("Brick")
         self.module = get_module(instance_definition.base.module_path)
         self.log.info(repr(instance_definition))
-        self.results = janus.Queue()
+
+        self._brick_output = janus.Queue()
+
         self.adapter = BrickAdapter(
-            AdapterMeta(brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)),
-            self.enqueue_result_as_packet,
+            AdapterMeta(
+               brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)
+            ),
+            self.adapter_output_callback,
             self.log,
             self.default_port,
         )
 
         self.instance = None
-
         self.last_execution_start = None
-        self.is_processing = Flag()
 
     def create_instance(self):
         """create an instance of the actual Brick"""
         try:
             self.instance = self.module.Brick(self.adapter, self.processing_parameters)
         except AttributeError:
-            self.log.with_context.warning("Brick class is missing in module: %r", self.module)
+            self.log.with_context.warning(
+                "Brick class is missing in module: %r", self.module
+            )
             raise ImportError(f"Brick class is missing in module: {self.module}")
 
     def terminate(self):
@@ -90,83 +106,89 @@ class Brick:
         self.instance.teardown()
         self.instance = None
 
-    async def get_results(self):
-        """async generator over the results from the brick"""
-        queue = self.results.async_q
-
-        while not (queue.closed and queue.empty()):
-            packet, port = await queue.get()
-            await self.metric_emitter.emit_packet_metrics(packet, self.execution_time)
-            queue.task_done()
-            yield packet, port
-
-        raise StopAsyncIteration
-
     @property
     def execution_time(self):
         return time_delta_in_ms(self.last_execution_start)
 
-    async def process(self, packet):
-        with self.is_processing:
-            await self.execute_brick(packet)
-
-    def enqueue_result_as_packet(self, result, port=None, parent_packet=None):
-        """ create a packet with an empty buffer if needed and
-        add the bricks result to the packets payload"""
-
-        port = port or self.default_port
-        self.log.debug(
-            "brick produced new value: %r , port: %s", result, port
-        )
-        if not isinstance(result, UjoBase):
-            result = python_to_ujo(result)
-
-        if not self.is_outlet:
-            packet = copy(parent_packet) if parent_packet else Packet(port=port)
-            packet.payload = result
+    def adapter_output_callback(self, value, port=None):
+        self.log.debug("brick emits on port [%s]: %r", port, value)
+        self._brick_output.sync_q.put((value, port))
+        self._brick_output.sync_q.join()
 
-            self.results.sync_q.put((packet, port))
-
-    async def execute_brick(self, packet):
+    async def process(self, packet):
         """run the brick module for the given packet in a separate thread"""
         self.log.info(
-            "(%s) execute Brick: %s(%s) for %r", self.flow.name, self.name, self.uid, packet
+            "(%s) execute Brick: %s(%s) for %r",
+            self.flow.name,
+            self.name,
+            self.uid,
+            packet,
         )
 
-        self.adapter.emit_new_packet = partial(
-            self.enqueue_result_as_packet, parent_packet=packet if not self.is_inlet else None
-        )
+        self.last_execution_start = time.time_ns()
 
         payload = None
         if not self.is_inlet:
             payload = packet.payload
 
-        self.last_execution_start = time.time_ns()
         loop = asyncio.get_event_loop()
-        result = await loop.run_in_executor(
+        execution = loop.run_in_executor(
             None, self.run_instance_processing, payload, packet.port
         )
 
-        if result is not None:
-            port = self.default_port
-            if isinstance(result, tuple):
-                try:
-                    result, port = result
-                except ValueError:
-                    raise ValueError("Invalid brick result ")
+        if not self.is_outlet:
+            await self.process_output(parent_packet=packet)
 
-            self.log.info("brick output: %r , port: %s", result, port)
-            self.enqueue_result_as_packet(result, port, parent_packet=packet)
+        await execution
 
-        await self.results.async_q.join()
         await self.metric_emitter.emit_brick_metrics(self.execution_time)
         if self.is_outlet:
             await self.metric_emitter.emit_packet_metrics(packet, self.execution_time)
 
     def run_instance_processing(self, payload, port):
-        """do the actual execution of the brick module and return it's result"""
+        """do the actual execution of the brick module and return its result"""
         try:
-            return self.instance.process(payload, port)
+            result = self.instance.process(payload, port)
+            self.log.debug("brick result: %r", result)
+            if result is not None:
+                if not isinstance(result, tuple):
+                    payload, port = result, self.default_port
+                elif len(result) > 2:
+                    payload, port = result, self.default_port
+                else:
+                    # this assumption might fail if someone returns a two-item tuple not containing payload and port.
+                    # we should make that more explicit somehow
+                    payload, port = result
+
+                self._brick_output.sync_q.put((payload, port))
+
         except Exception as error:  # pylint: disable=broad-except
             self.log.with_context.error("brick execution failed: %r", error, exc_info=True)
-            return None
+
+        self._brick_output.sync_q.put(SENTINEL)
+
+    async def process_output(self, parent_packet):
+        while True:
+            brick_output = await self._brick_output.async_q.get()
+            self.log.debug("process brick output: %r", brick_output)
+            try:
+                if brick_output is SENTINEL:
+                    return
+                if brick_output is not None:
+                    await self.publish_packet(*brick_output, parent_packet)
+
+            finally:
+                self._brick_output.async_q.task_done()
+
+    async def publish_packet(self, payload, port, parent_packet):
+        if port is None:
+            port = self.default_port
+
+        if not isinstance(payload, UjoBase):
+            payload = python_to_ujo(payload)
+
+        packet = copy(parent_packet) if parent_packet else Packet(port=port)
+        packet.payload = payload
+
+        self.log.debug("brick output on port [%s]: %r", port, packet)
+        await self.output.put(packet, port)
\ No newline at end of file
diff --git a/titanfe/apps/brick_runner/grid_manager.py b/titanfe/apps/brick_runner/grid_manager.py
index c6ee46b..fe6e5ac 100644
--- a/titanfe/apps/brick_runner/grid_manager.py
+++ b/titanfe/apps/brick_runner/grid_manager.py
@@ -48,12 +48,3 @@ class GridManager:
         payload = {"runnerId": self.runner_uid, "brickId": self.brick_uid}
         await requests.post(f"{self.address}/brickrunners/deregister", data=json.dumps(payload))
         self.log.debug("Deregister: %r", payload)
-
-    async def request_scaling(self, consumer_uid):
-        """send brick scaling request"""
-        payload = {"brickId": self.brick_uid, "consumerId": consumer_uid}
-        try:
-            await requests.post(f"{self.address}/brickrunners/scaling", data=json.dumps(payload))
-            self.log.debug("Requested scaling: %r", payload)
-        except ClientError as error:
-            self.log.warning("ScalingRequest failed %s: %s", error, payload)
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index ac157f9..8767f45 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -6,130 +6,97 @@
 #
 
 """The INPUT side of a brick (runner)"""
-
 import asyncio
-from asyncio import futures, CancelledError
-from functools import partial
+import functools
+import titanfe.log
+import typing as T
 
-from titanfe.connection import Connection
-from titanfe.messages import PacketRequest, ConsumerRegistration
-from titanfe.utils import cancel_tasks
-from .metrics import QueueWithMetrics
-from .packet import Packet
+from titanfe.apps.brick_runner.packet import Packet
 
+Port = T.NewType("Port", str)
 
-class Input:
-    """The Input side of a brick runner requests new packets from the previous BrickRunners
-    OutputServer until it's QueueLimit is exceeded and again once the "low level" is reached.
-    The Input will also emit queue metrics every 0.1 sec if there are packets in the queue.
 
-    Arguments:
-        runner (BrickRunner): instance of a parent brick runner
-        adress (NetworkAddress): (host, port) of the source-BrickRunners OutputServer
-    """
+class Input:
+    def __init__(
+        self,
+        input_queues: T.Dict[Port, str],
+        transport: "transport.RabbitMQ",
+        max_idle_time=60,
+        logger=None,
+    ):
+        self.transport = transport
+        self.log = logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
+
+        self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
+
+        self._getter = asyncio.Future()
+        self._queues = input_queues
+        self._packets = asyncio.Queue()#maxsize=1)
+        self._packets_done = {}
 
-    def __init__(self, runner):
-        self.name = f"Input.{runner.brick.name}"
-        self.runner = runner
-        self.log = runner.log.getChild("Input")
+    def __aiter__(self):
+        return self
 
-        self.metric_emitter = runner.metric_emitter
-        self.metric_task = None
+    async def __anext__(self) -> Packet:
+        self._getter = asyncio.create_task(self._packets.get())
+        try:
+            self.log.debug("wait for packet")
+            port, packet = await asyncio.wait_for(self._getter, timeout=self.max_idle_time)
+            self.log.debug("got packet: %r", packet)
+        except asyncio.CancelledError:
+            self.log.debug("packet getter was cancelled")
+            raise StopAsyncIteration
+        except asyncio.TimeoutError:
+            self.log.debug("packet getter timed out")
+            raise StopAsyncIteration
+        else:
+            packet.port = port
+            packet.update_input_exit()
+            return packet
+
+    async def start(self):
+        self.log.debug("start consumers: %r", self._queues)
+        await asyncio.gather(
+            *(
+                self.transport.start_consumer(
+                    queue, functools.partial(self._on_new_message, port)
+                )
+                for port, queue in self._queues.items()
+            )
+        )
+
+    async def stop(self):
+        self.log.debug("stop consumers: %r", self._queues)
+        await asyncio.gather(
+            self.transport.stop_consumer(queue) for queue in self._queues.values()
+        )
+        self._getter.cancel()
+
+    async def _on_new_message(self, port, packet, done_callback: T.Callable):
+        self.log.debug("received on port %r: %r", port, packet)
+
+        if not isinstance(packet, (bytes, Packet)):
+            raise TypeError("only Packets and byte-encoded packets are supported")
 
-        self.receivers = []
-        self.packets = QueueWithMetrics(runner.metric_emitter, self.name)
+        try:
+            packet = Packet.from_bytes(packet) if isinstance(packet, bytes) else packet
+        except Exception as error:
+            self.log.error("Failed to convert message to packet", exc_info=True)
+            done_callback()
+            return
 
-        self.batch_size = 25
-        self.low_queue_level = 10
-        self.at_low_queue_level = asyncio.Event()
-        self.at_low_queue_level.set()
+        packet.update_input_entry()
 
-        self._close = False
-        self._getter = None
+        marked_done = asyncio.Event()
+        self._packets_done[packet.uid] = marked_done
 
-    def __aiter__(self):
-        return self
+        self.log.debug("move to input queue: %r", (port, packet))
+        await self._packets.put((port, packet))
+        await marked_done.wait()
+        await done_callback()
 
-    async def __anext__(self) -> Packet:
-        while not self._close:
-            self._getter = asyncio.create_task(self.get())
-            try:
-                # careful: without timeout the get sometimes hangs on `self.packets.get()`
-                #          in that case the brick runner does not shutdown correctly.
-                # TODO: find out if there's a better way...
-                return await asyncio.wait_for(self._getter, timeout=2)
-            except CancelledError:
-                raise StopAsyncIteration
-            except futures.TimeoutError:
-                pass  # retry or abort if closing...
-        raise StopAsyncIteration
-
-    async def get(self):
-        """awaitable to get the next available packet from the input queue"""
-        packet = await self.packets.get()
-        self.packets.task_done()
-        if self.packets.qsize() <= self.low_queue_level:
-            self.at_low_queue_level.set()
-
-        packet.update_input_exit()
-        return packet
-
-    async def put(self, packet):
-        packet.update_input_entry()
-        await self.packets.put(packet)
-        if self.packets.qsize() > self.low_queue_level:
-            self.at_low_queue_level.clear()
-
-    def add_sources(self, sources):
-        for source in sources:
-            self.add_source(source)
-
-    def add_source(self, source):
-        port_name, target_port = source["port"], source["target_port"]
-        address = source["address"].split(":")
-        task = asyncio.create_task(self.get_input(address, port_name, target_port))
-        self.receivers.append(task)
-        task.add_done_callback(partial(self.handle_input_loss, address, port_name))
-
-    def handle_input_loss(self, address, port_name, task):
-        """
-        if we loose a connection to some input source, we handle removing the appropriate task here.
-        Any CancelledError will be ignored, all others Exceptions are unexpected and will be logged.
-        """
-        self.receivers.remove(task)
-        try:
-            task.result()
-        except CancelledError:
-            pass
-        except Exception as error:  # pylint: disable=broad-except
-            self.log.error("Error on input connection: %r on %s -> %s", port_name, address, error)
-
-    async def get_input(self, address, port_name, target_port):
-        """Connect to and retrieve packets from the given address"""
-        async with await Connection.open(address) as connection:
-            await connection.send(ConsumerRegistration(content=(self.runner.brick.uid, port_name)))
-
-            while True:
-                await asyncio.sleep(0)  # be cooperative
-                await self.at_low_queue_level.wait()
-                await connection.send(PacketRequest(self.batch_size))
-                for _ in range(self.batch_size):
-                    message = await connection.receive()
-                    if not message:
-                        return  # disconnected
-                    packet = Packet.from_dict(message.content)
-                    packet.port = target_port
-                    await self.put(packet)
-
-    async def close(self):
-        """Stop the input"""
-        if self.receivers:
-            await cancel_tasks(self.receivers)
-        self._close = True
-        if self._getter:
-            self._getter.cancel()
-        await self.packets.close()
-
-    @property
-    def is_empty(self):
-        return not self.packets.unfinished_tasks
+    def mark_done(self, packet):
+        self.log.debug("mark done: %r", packet)
+        marked_done = self._packets_done.pop(packet.uid)
+        marked_done.set()
+        self._packets.task_done()
diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
new file mode 100644
index 0000000..2758730
--- /dev/null
+++ b/titanfe/apps/brick_runner/output.py
@@ -0,0 +1,116 @@
+#
+# Copyright (c) 2019-present, wobe-systems GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# found in the LICENSE file in the root directory of this source tree.
+#
+
+"""The output with its server and ports"""
+import asyncio
+from copy import deepcopy, copy
+from dataclasses import dataclass
+
+from UJOSchema import schema_to_type
+
+from ujotypes import UjoBase, UJO_VARIANT_NONE
+
+import titanfe.log
+from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
+from titanfe.apps.control_peer.brick import Connection
+from titanfe.apps.brick_runner.packet import Packet
+
+from titanfe.apps.brick_runner import transport
+import typing as T
+
+
+@dataclass
+class Consumer:
+    queue_name: str
+    target_type: UjoBase
+    mapping_rules: MappingRules
+    buffer_description: BufferDescription
+
+    @classmethod
+    def from_connection(cls, connection: Connection) -> "Consumer":
+        return cls(
+            queue_name=connection.target_queue,
+            target_type=(
+                schema_to_type(
+                    connection.targetPort.schema, connection.targetPort.typeName
+                )
+                if connection.targetPort.schema
+                else UJO_VARIANT_NONE
+            ),
+            buffer_description=BufferDescription(connection.buffer),
+            mapping_rules=MappingRules(connection.mapping),
+        )
+
+    def create_packet(self, packet: Packet) -> Packet:
+        packet = copy(packet)
+
+        try:
+            packet.buffer = packet.buffer.new_buffer_from_result(
+                result=packet.payload, buffer_description=self.buffer_description
+            )
+        except Exception as error:  # pylint: disable=broad-except
+            raise ValueError(
+                f"updating packet buffer for target {self.queue_name} failed: {error}"
+            ) from error
+
+        try:
+            new_payload = self.mapping_rules.apply(
+                buffer=packet.buffer,
+                source=packet.payload,
+                target=deepcopy(self.target_type),
+            )
+        except Exception as error:  # pylint: disable=broad-except
+            raise ValueError(
+                f"mapping packet values for target {self.queue_name} failed: {error!r}"
+            ) from error
+
+        packet.payload = new_payload
+        return packet
+
+
+@dataclass
+class Port:
+    name: str
+    consumers: T.List[Consumer]
+
+
+class Output:
+    """The output side of a brick runner creates a Server.
+       It will then send packets as requested by the following inputs.
+
+    Arguments:
+        runner (BrickRunner): instance of a parent brick runner
+        name (str): a name for the output destination
+        address (NetworkAddress): the network address of the output server
+    """
+
+    def __init__(
+        self,
+        output_connections: T.Dict["PortName", T.List[Connection]],
+        transport: transport.RabbitMQ,
+        logger=None,
+    ):
+        self.transport = transport
+        self.log = logger.getChild("Output") if logger else titanfe.log.getLogger(__name__)
+
+        self.ports: T.Dict[str, Port] = {
+            port_name: Port(
+                port_name,
+                [Consumer.from_connection(connection) for connection in targets],
+            )
+            for port_name, targets in output_connections.items()
+        }
+
+    def __getitem__(self, port) -> Port:
+        return self.ports[port]
+
+    async def put(self, packet, port):
+        consumers = self[port].consumers
+        self.log.debug("publish %r on port %r to consumers: %r", packet, port, consumers)
+        await asyncio.gather(
+            *(self.transport.publish(consumer.queue_name, consumer.create_packet(packet)) for consumer in consumers)
+        )
diff --git a/titanfe/apps/brick_runner/output/__init__.py b/titanfe/apps/brick_runner/output/__init__.py
deleted file mode 100644
index dbdad10..0000000
--- a/titanfe/apps/brick_runner/output/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-
-"""The output side of a BrickRunner"""
-
-from .consumer import Consumer
-from .group import ConsumerGroup
-from .port import Port
-from .output import Output
-
-__all__ = ["Consumer", "ConsumerGroup", "Port", "Output"]
diff --git a/titanfe/apps/brick_runner/output/consumer.py b/titanfe/apps/brick_runner/output/consumer.py
deleted file mode 100644
index c810504..0000000
--- a/titanfe/apps/brick_runner/output/consumer.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-
-"""a Consumer represents a connection made to the output server"""
-
-import asyncio
-
-from titanfe.utils import create_uid
-
-
-class Consumer:
-    """wrap incoming connections and handle sending packets"""
-
-    def __init__(self, port_name, brick_instance_id, connection):
-        self.uid = create_uid(f"C-{brick_instance_id}-")
-        self.port_name = port_name
-        self.brick_instance_id = brick_instance_id
-        self.connection = connection
-
-        self.listener = asyncio.create_task(self.listen())
-        self._packets_expected = 0
-        self._receptive = asyncio.Event()
-
-        self.disconnected = asyncio.Event()
-
-    def __repr__(self):
-        return (
-            f"Consumer("
-            f"uid={self.uid}, "
-            f"port_name={self.port_name}, "
-            f"brick_instance_id={self.brick_instance_id})"
-        )
-
-    async def is_receptive(self):
-        await self._receptive.wait()
-        return self
-
-    async def listen(self):
-        """wait for packet requests, set disconnected-Event if the connection gets closed"""
-        async for message in self.connection:
-            self._packets_expected += message.content
-            self._receptive.set()
-
-        self.disconnected.set()
-        self._receptive.clear()
-
-    async def close_connection(self):
-        self.listener.cancel()
-        await self.connection.close()
-
-    async def send(self, packet):
-        """send a packet"""
-        self._packets_expected -= 1
-        if self._packets_expected == 0:
-            self._receptive.clear()
-
-        packet.update_output_exit()
-        await self.connection.send(packet.as_message())
diff --git a/titanfe/apps/brick_runner/output/group.py b/titanfe/apps/brick_runner/output/group.py
deleted file mode 100644
index 2718db8..0000000
--- a/titanfe/apps/brick_runner/output/group.py
+++ /dev/null
@@ -1,193 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-
-"""Group represents multiple consumers of the same type"""
-
-import asyncio
-from asyncio import CancelledError
-from collections import deque
-from copy import deepcopy, copy
-from dataclasses import dataclass, field
-from typing import List
-
-from ujotypes.variants.none import UJO_VARIANT_NONE
-from UJOSchema import schema_to_type
-
-from titanfe.apps.brick_runner.connection import BufferDescription, MappingRules
-from titanfe.apps.brick_runner.metrics import QueueWithMetrics
-from titanfe.utils import cancel_tasks, pairwise, Flag
-
-
-@dataclass
-class ConsumerGroupTasks:
-    """Task of a Consumer group"""
-
-    send_packets: asyncio.Task
-    check_scaling: asyncio.Task
-    handle_disconnects: field(default_factory=List)
-
-    @property
-    def all_tasks(self):
-        return self.handle_disconnects + [self.send_packets, self.check_scaling]
-
-    def __iter__(self):
-        return iter(self.all_tasks)
-
-    def add_disconnect_handler(self, on_disconnect: asyncio.coroutine):
-        task = asyncio.create_task(on_disconnect)
-        self.handle_disconnects.append(task)
-        task.add_done_callback(self.handle_disconnects.remove)
-
-    async def cancel(self):
-        await cancel_tasks(self)
-
-
-class ConsumerGroup:
-    """Group consumers of same type and distribute packets between them"""
-
-    slow_queue_alert_callback = lambda *args, **kwargs: None  # noqa
-
-    def __init__(self, consumer_instance_id, queue, consumer, logger):
-        self.name = consumer_instance_id
-        self.log = logger
-        self.consumers = []
-        self.packets: QueueWithMetrics = queue
-        self.has_packets = Flag()
-        self.has_consumers = Flag()
-        self.new_consumer_entered = Flag()
-
-        self.autoscale_queue_level = consumer.get("autoscale_queue_level", 0)
-        target_port = consumer.get("targetPort")
-        self.target = (
-            schema_to_type(target_port["schema"], target_port["typeName"])
-            if target_port.get("schema", None)
-            else UJO_VARIANT_NONE
-        )
-        self.buffer_description = BufferDescription(consumer["buffer"])
-        self.mapping = MappingRules(consumer["mapping"])
-
-        self.tasks = ConsumerGroupTasks(
-            asyncio.create_task(self.send_packets()),
-            asyncio.create_task(self.check_scaling_required(self.autoscale_queue_level)),
-            [],
-        )
-
-    def __iter__(self):
-        return iter(self.consumers)
-
-    def __repr__(self):
-        return f"Group(name={self.name}, consumers={self.consumers})"
-
-    async def close(self):
-        await asyncio.gather(*[consumer.close_connection() for consumer in self])
-        await self.tasks.cancel()
-        await self.packets.close()
-
-    async def check_scaling_required(self, autoscale_queue_level=0, check_interval=0.2):
-        """ watch the queue and dispatch an alert if it grows continuously,
-            then wait for a new consumer before resetting the queue history - repeat."""
-        try:
-            if not autoscale_queue_level or not self.slow_queue_alert_callback:
-                return
-
-            await self.has_packets.wait()
-            await self.slow_queue_alert_callback(self.name)
-
-            # wait for the first consumer to come in
-            await self.new_consumer_entered.wait()
-            self.new_consumer_entered.clear()
-
-            history = deque(maxlen=5)
-
-            while True:
-                await asyncio.sleep(check_interval)
-                current_queue_size = self.packets.qsize()
-                history.append(current_queue_size)
-
-                if current_queue_size < autoscale_queue_level or len(history) < 3:
-                    continue
-
-                queue_is_growing = all(0 < prev <= curr for prev, curr in pairwise(history))
-                if queue_is_growing:
-                    await self.slow_queue_alert_callback(self.name)
-                    await self.new_consumer_entered.wait()
-
-                self.new_consumer_entered.clear()
-                history.clear()
-        except CancelledError:
-            return
-
-    def add(self, consumer):
-        self.consumers.append(consumer)
-        self.has_consumers.set()
-        self.new_consumer_entered.set()
-        self.tasks.add_disconnect_handler(self.handle_disconnect(consumer))
-
-    async def handle_disconnect(self, consumer):
-        """handle a consumer disconnecting"""
-        await consumer.disconnected.wait()
-        self.consumers.remove(consumer)
-        if not self.consumers:
-            self.has_consumers.clear()
-            await cancel_tasks([self.tasks.check_scaling])
-            self.tasks.check_scaling = asyncio.create_task(
-                self.check_scaling_required(self.autoscale_queue_level)
-            )
-
-    async def enqueue(self, packet):
-        """enqueue the bricks packet after applying the connections
-        mapping rules and updating its buffer"""
-
-        packet = copy(packet)
-
-        try:
-            packet.buffer = packet.buffer.new_buffer_from_result(
-                result=packet.payload, buffer_description=self.buffer_description
-            )
-        except Exception as err:  # pylint: disable=broad-except
-            self.log.error(f"updating buffer failed: {err}")
-            return
-
-        try:
-            next_input = self.mapping.apply(
-                buffer=packet.buffer, source=packet.payload, target=deepcopy(self.target)
-            )
-        except Exception as err:  # pylint: disable=broad-except
-            self.log.error(f"mapping failed: {err!r}")
-            return
-
-        packet.payload = next_input
-
-        await self.packets.put(packet)
-        self.has_packets.set()
-
-    async def send_packets(self):
-        """send packets"""
-        while True:
-            while not (self.has_consumers and self.has_packets):
-                await self.has_consumers.wait()
-                await self.has_packets.wait()
-
-            consumer = await self.get_receptive_consumer()
-            packet = self.packets.get_nowait()
-            if self.packets.empty():
-                self.has_packets.clear()
-
-            await consumer.send(packet)
-            self.packets.task_done()
-
-    async def get_receptive_consumer(self):
-        """wait until any of the consumers is ready to receive and then return it"""
-        done, pending = await asyncio.wait(
-            {consumer.is_receptive() for consumer in self}, return_when=asyncio.FIRST_COMPLETED
-        )
-        await cancel_tasks(pending)
-        return done.pop().result()
-
-    @property
-    def has_unfinished_business(self):
-        return self.packets.unfinished_tasks
diff --git a/titanfe/apps/brick_runner/output/output.py b/titanfe/apps/brick_runner/output/output.py
deleted file mode 100644
index 74d31c0..0000000
--- a/titanfe/apps/brick_runner/output/output.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-
-"""The output with it's server and ports"""
-
-import asyncio
-from functools import partial
-from itertools import chain
-
-from .port import Port
-from ..metrics import QueueWithMetrics
-
-
-class Output:
-    """The output side of a brick runner creates a Server.
-       It will then send packets as requested by the following inputs.
-
-    Arguments:
-        runner (BrickRunner): instance of a parent brick runner
-        name (str): a name for the output destination
-        address (NetworkAddress): the network address of the output server
-    """
-
-    def __init__(self, logger, create_output_queue):
-        self.log = logger.getChild("Output")
-        self.create_queue = create_output_queue
-
-        self.ports = {}
-
-    def __iter__(self):
-        return iter(self.ports.values())
-
-    def __getitem__(self, port_name):
-        try:
-            port = self.ports[port_name]
-        except KeyError:
-            port = self.ports[port_name] = Port(port_name)
-
-        return port
-
-    def __repr__(self):
-        return f"Output(ports={repr(self.ports)})"
-
-    @classmethod
-    async def create(cls, logger, metric_emitter):
-        """Creates a new instance"""
-        output = cls(logger, create_output_queue=partial(QueueWithMetrics, metric_emitter))
-        return output
-
-    def make_ports_and_groups(self, consumers_by_port):
-        for port_name, consumers in consumers_by_port.items():
-            for consumer in consumers:
-                self.log.debug("add consumer group: %r on %r", consumer, port_name)
-                self.add_consumer_group(port_name, consumer)
-
-    def add_consumer_group(self, port_name, consumer):
-        """add a configured output target"""
-        target_instance_id = consumer["InstanceID"]
-        self[port_name].add_consumer_group(
-            target_instance_id, self.create_queue(target_instance_id), consumer, self.log
-        )
-
-    async def close(self):
-        """close all connections and the server itself"""
-        if self.ports:
-            await asyncio.wait({port.close() for port in self})
-
-    @property
-    def consumer_groups(self):
-        return chain.from_iterable(self)
-
-    @property
-    def is_empty(self):
-        """True, if no packets are waiting to be outputted"""
-        return not any(group.has_unfinished_business for group in self.consumer_groups)
diff --git a/titanfe/apps/brick_runner/output/port.py b/titanfe/apps/brick_runner/output/port.py
deleted file mode 100644
index 8f6f93a..0000000
--- a/titanfe/apps/brick_runner/output/port.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-
-"""An output Port of a Brick to connect to and receive data from"""
-
-import asyncio
-
-from .group import ConsumerGroup
-
-
-class Port:
-    """"A port holds a consumer group for each brick connected to the port in a flow.
-        The consumer group holds at least one connection with the succeeding brick.
-        Should the succeeding brick scale connections are added to the consumer group
-        when new instances of the brick come online"""
-
-    def __init__(self, name):
-        self.name = name
-        self.consumer_groups = {}
-
-    def add_consumer_group(self, consumer_instance_id, queue, consumer, logger):
-        """add a new consumer group"""
-        if consumer_instance_id not in self.consumer_groups:
-            group = ConsumerGroup(consumer_instance_id, queue, consumer, logger)
-            self.consumer_groups[consumer_instance_id] = group
-
-    def add_consumer(self, consumer):
-        group = self.consumer_groups[consumer.brick_instance_id]
-        group.add(consumer)
-
-    async def enqueue(self, packet):
-        if self.consumer_groups:
-            await asyncio.wait({group.enqueue(packet) for group in self})
-
-    async def close(self):
-        if self.consumer_groups:
-            await asyncio.wait({group.close() for group in self})
-
-    def __iter__(self):
-        return iter(self.consumer_groups.values())
-
-    def __repr__(self):
-        return f"Port(name={self.name}, consumer_groups={self.consumer_groups})"
diff --git a/titanfe/apps/brick_runner/packet.py b/titanfe/apps/brick_runner/packet.py
index d1f0194..3138d72 100644
--- a/titanfe/apps/brick_runner/packet.py
+++ b/titanfe/apps/brick_runner/packet.py
@@ -8,15 +8,24 @@
 """An information packet passed between Bricks"""
 
 import functools
+import pickle
 import time
 from dataclasses import dataclass, field
 
-from ujotypes import UjoBase
-from ujotypes.variants.none import UJO_VARIANT_NONE
 
-from titanfe.apps.brick_runner.connection import Buffer
 from titanfe.messages import PacketMessage
+from titanfe.ujo_helper import py_to_ujo_bytes
 from titanfe.utils import create_uid, ns_to_ms, time_delta_in_ms, DictConvertable
+from titanfe.apps.brick_runner.value_mapping import Buffer
+
+from ujotypes import UjoBase, UjoStringUTF8, read_buffer, UjoMap, ujo_to_python
+from ujotypes.variants.none import UJO_VARIANT_NONE
+
+# ENCODING = "PICKLE"
+ENCODING = "UJO"
+
+PAYLOAD = UjoStringUTF8("payload")
+BUFFER = UjoStringUTF8("buffer")
 
 
 @dataclass(repr=False)
@@ -64,3 +73,35 @@ class Packet(DictConvertable):
 
     def as_message(self):
         return PacketMessage(self.to_dict())
+
+    def __bytes__(self):
+        if ENCODING == "PICKLE":
+            return pickle.dumps(self)
+
+        # default: Ujo
+        return py_to_ujo_bytes(self.to_dict())
+
+    @classmethod
+    def from_bytes(cls, bytez: bytes):
+        if ENCODING == "PICKLE":
+            return pickle.loads(bytez)
+
+        # default: Ujo
+        ujomap = read_buffer(bytez)
+
+        payload = ujomap[PAYLOAD]
+        del ujomap[PAYLOAD]
+        try:
+            buffer = ujomap[BUFFER]
+        except KeyError:
+            buffer = UjoMap()
+        else:
+            del ujomap[BUFFER]
+
+        pymap = ujo_to_python(ujomap)
+
+        # set payload to the original ujo payload
+        pymap["payload"] = payload
+        pymap["buffer"] = Buffer(buffer)
+
+        return cls.from_dict(pymap)
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index 5e019ba..55b1a74 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -8,23 +8,18 @@
 """The actual brick runner"""
 
 import asyncio
-import json
 import os
 import signal
-from datetime import datetime, timedelta
 
+from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe import log as logging
-from titanfe.connection import Connection
-from titanfe.messages import Message
-from titanfe.utils import cancel_tasks, get_ip_address
+from titanfe.utils import cancel_tasks
 from .brick import Brick
-from .grid_manager import GridManager
 from .input import Input
 from .metrics import MetricEmitter
-from .output import Output, Consumer, ConsumerGroup
+from .output import Output
 from .packet import Packet
-from ..control_peer.brick import BrickInstanceDefinition
-from ...config import configuration
+from .transport import RabbitMQ
 
 
 class BrickRunner:
@@ -49,6 +44,7 @@ class BrickRunner:
         self.server = None
         self.address = (None, None)
         self.gridmanager = None
+        self.rabbitmq = None
 
         self.setup_completed = asyncio.Event()
 
@@ -66,101 +62,50 @@ class BrickRunner:
     async def setup(self, brick_definition: BrickInstanceDefinition):
         """does the inital setup parts that have to be awaited"""
         self.log = logging.TitanPlatformLogger(
-            f"{__name__}.{self.uid}.{brick_definition.name}", context=logging.global_context
+            f"{__name__}.{self.uid}.{brick_definition.name}",
+            context=logging.global_context,
         )
 
-        await self.start_server()
+        self.rabbitmq = RabbitMQ(brick_definition.message_exchange)
+        self.input = Input(
+            brick_definition.input_queues,
+            max_idle_time=brick_definition.runtime_parameters.exit_after_idle_seconds,
+            transport=self.rabbitmq,
+            logger=self.log
+        )
+        self.output = Output(
+            brick_definition.connections.output, transport=self.rabbitmq, logger=self.log
+        )
 
         self.metric_emitter = await MetricEmitter.create_from_brick_runner(self)
-        self.brick = Brick(brick_definition, self.metric_emitter, self.log)
-        self.metric_emitter.set_metadata_from_runner(self)
-
-        self.gridmanager = GridManager(self.uid, self.brick.uid)
-        ConsumerGroup.slow_queue_alert_callback = self.gridmanager.request_scaling
 
-        self.input = Input(self)
-        self.output = await Output.create(self.log, self.metric_emitter)
-        available_input_sources = await self.gridmanager.register_runner(self.address)
-        if available_input_sources:
-            self.log.debug("input sources %s", available_input_sources)
-            self.input.add_sources(available_input_sources)
+        self.brick = Brick(brick_definition, self.metric_emitter, self.log, self.output)
 
-        if not self.brick.is_outlet:
-            self.output.make_ports_and_groups(brick_definition.connections.output)
-            self.tasks.append(asyncio.create_task(self.output_results()))
+        self.metric_emitter.set_metadata_from_runner(self)
 
         self.add_signal_handlers()
-
         self.setup_completed.set()
 
     async def run(self):
         """process items from the input"""
         self.log.with_context.info("Start runner: %s", self.uid)
 
-        if self.brick.is_inlet:
-            # trigger processing
-            await self.input.put(Packet(port="TRIGGER"))
+        try:
+            with self.brick:
+                if self.brick.is_inlet:
+                    return await self.brick.process(Packet())
 
-        if not self.brick.is_inlet:
-            self.tasks.append(asyncio.create_task(self.exit_when_idle()))
+                # else:
+                await self.input.start()
+                async for packet in self.input:
+                    self.log.debug("process packet: %s", packet)
+                    await self.brick.process(packet)
+                    self.input.mark_done(packet)
 
-        try:
-            await self.process_input()
         except Exception:  # pylint: disable=broad-except
             self.log.with_context.error("Brick failed", exc_info=True)
-
         self.log.with_context.info("Exit")
 
-    async def start_server(self):
-        """start server"""
-        self.server = await asyncio.start_server(
-            self.handle_incoming_connection, host=configuration.IP or get_ip_address()
-        )
-        self.address = self.server.sockets[0].getsockname()
-
-    async def handle_incoming_connection(self, reader, writer):
-        """create consumers for incoming connections and dispatch the connection to them"""
-        await self.setup_completed.wait()
-
-        connection = Connection(reader, writer, self.log)
-
-        # We can remove this once the Gridmanager starts sending UJO Messages
-        try:
-            msg_len = await connection.reader.readexactly(4)
-        except (asyncio.IncompleteReadError, ConnectionError):
-            self.log.debug("Stream at EOF - close connection.")
-            # self.log.debug('', exc_info=True)
-            await connection.close()
-            return
-
-        rawmsg = await connection.reader.readexactly(int.from_bytes(msg_len, "big"))
-
-        try:
-            msg = connection.decode(rawmsg)
-            message = Message(*msg)
-        except TypeError:
-            self.log.error("Received unknown Message format: %s", rawmsg)
-            return
-        except Exception:  # pylint: disable=broad-except
-            # self.log.error("Failed to decode %r", msg, exc_info=True)
-            # raise ValueError(f"Failed to decode {msg}")
-            # TODO: # Use UJO Encoding and appropriate msg format in GridManager
-            self.log.info("new input source available: %s", json.loads(rawmsg))
-            self.input.add_source(json.loads(rawmsg))
-        else:
-            self.log.info("new consumer entered: %s", message.content)
-            brick_instance_id, port = message.content
-            self.output[port].add_consumer(Consumer(port, brick_instance_id, connection))
-
-    async def process_input(self):
-        """ get packets from the input and process them """
-        with self.brick:
-            async for packet in self.input:
-                packet.update_input_exit()
-                self.log.debug("process packet: %s", packet)
-                await self.brick.process(packet)
-                self.idle_since = None
-
     def schedule_shutdown(self, sig, frame):  # pylint: disable=unused-argument
         self.log.info(
             "Received signal %s - scheduling shutdown",
@@ -180,11 +125,9 @@ class BrickRunner:
         self.log.info("Stop Processing")
         await self.gridmanager.deregister_runner()
         logging.flush_kafka_log_handler()
-        await self.input.close()
+        await self.input.stop()
         self.brick.terminate()
-        self.server.close()
-        await self.server.wait_closed()
-        await self.output.close()
+        await self.rabbitmq.disconnect()
         await self.metric_emitter.stop()
 
     async def shutdown(self):
@@ -192,40 +135,3 @@ class BrickRunner:
         self.log.with_context.info("Initiating Shutdown")
         await self.stop_processing()
         await cancel_tasks(self.tasks, wait_cancelled=True)
-
-    async def output_results(self):
-        """get results from the brick execution and add them to the output queues of this runner"""
-        async for packet, port in self.brick.get_results():
-            packet.update_output_entry()
-            await self.output[port].enqueue(packet)
-
-    @property
-    def is_idle(self):
-        return self.input.is_empty and self.output.is_empty and not self.brick.is_processing
-
-    async def exit_when_idle(self):
-        """Schedule as task to initiate shutdown if the configured maximum idle time is reached"""
-        if not self.brick.exit_after_idle_seconds:
-            return  # deactivated
-
-        # check at least once per second:
-        interval = min(self.brick.exit_after_idle_seconds * 0.1, 1)
-
-        self.idle_since = None
-        idle_time = timedelta(seconds=0)
-        max_idle_time = timedelta(seconds=self.brick.exit_after_idle_seconds)
-
-        while idle_time <= max_idle_time:
-            await asyncio.sleep(interval)
-            if not self.is_idle:
-                self.idle_since = None
-                continue
-
-            if self.idle_since is None:
-                self.idle_since = datetime.now()
-                continue
-
-            idle_time = datetime.now() - self.idle_since
-
-        self.log.with_context.info("Max idle time reached. Scheduling shutdown")
-        asyncio.create_task(self.shutdown())
diff --git a/titanfe/apps/brick_runner/transport.py b/titanfe/apps/brick_runner/transport.py
new file mode 100644
index 0000000..ae30b57
--- /dev/null
+++ b/titanfe/apps/brick_runner/transport.py
@@ -0,0 +1,79 @@
+import asyncio
+
+import aioamqp
+import aioamqp.protocol
+import aioamqp.channel
+
+import titanfe.log
+
+class RabbitMQ:
+    # TODO: make "robust" - handle reconnect and stuff
+    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
+
+    def __init__(self, exchange_name, logger=None):
+        self.log = logger.getChild("transport.RabbitMQ") if logger else titanfe.log.getLogger(__name__)
+        self.exchange_name = exchange_name
+        self._consumers = {}
+        self._connection: aioamqp.protocol.AmqpProtocol = None
+        self._channel: aioamqp.channel.Channel = None
+
+    async def connect(self):
+        _, self._connection = await aioamqp.connect(
+            host="rabbitmq",
+            port="5672",
+            login="wobe",
+            password="RabbitMQ-4-OpenFBA",
+            heartbeat=60,
+        )
+        self.log.debug("connected to rabbitmq")
+        self._channel = await self._connection.channel()
+        await self._channel.basic_qos(prefetch_count=1)
+
+    async def disconnect(self):
+        if not self._connection:
+            return
+
+        await asyncio.gather(
+            *(self.stop_consumer(consumer) for consumer in list(self._consumers))
+        )
+
+        await self._channel.close()
+        await self._connection.close()
+        self.log.debug("disconnected from rabbitmq")
+
+    async def connection(self):
+        if not self._connection:
+            await self.connect()
+        return self._connection
+
+    async def channel(self) -> aioamqp.channel.Channel:
+        await self.connection()
+        return self._channel
+
+    async def start_consumer(self, queue_name, on_new_message_callback):
+
+        async def callback_wrapper(msgchannel, body, envelope, _):
+            async def done_callback():
+                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
+
+            await on_new_message_callback(body, done_callback)
+
+        channel = await self.channel()
+
+        await channel.queue_declare(queue_name, durable=True)
+        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
+        self._consumers[queue_name] = consumer_tag
+
+    async def stop_consumer(self, queue_name):
+        tag = self._consumers.pop(queue_name)
+        channel = await self.channel()
+        await channel.basic_cancel(tag)
+
+    async def publish(self, queue_name, message):
+        if not isinstance(message, bytes):
+            message = bytes(message)
+
+        self.log.debug("publish to %r: %r", queue_name, message)
+
+        channel = await self.channel()
+        await channel.basic_publish(message, self.exchange_name, routing_key=queue_name)
diff --git a/titanfe/apps/brick_runner/connection.py b/titanfe/apps/brick_runner/value_mapping.py
similarity index 100%
rename from titanfe/apps/brick_runner/connection.py
rename to titanfe/apps/brick_runner/value_mapping.py
diff --git a/titanfe/apps/control_peer/brick.py b/titanfe/apps/control_peer/brick.py
index 4155e7b..36b794f 100644
--- a/titanfe/apps/control_peer/brick.py
+++ b/titanfe/apps/control_peer/brick.py
@@ -10,6 +10,7 @@
 import re
 import shutil
 from collections import namedtuple
+from dataclasses import dataclass, field
 from datetime import datetime
 from io import BytesIO
 from pathlib import Path
@@ -17,14 +18,16 @@ from zipfile import ZipFile
 import venv
 import os
 import subprocess
+import typing as T
 
 from titanfe.constants import GET_PIP
 from titanfe import log as logging
+from titanfe.utils import truncate
 from .services import package_manager
 from ...config import configuration
 
 Flow = namedtuple("Flow", ("uid", "name", "schema"))
-Connections = namedtuple("Connections", ("input", "output"))
+
 Ports = namedtuple("Ports", ("input", "output"))
 
 RuntimeParameters = namedtuple(
@@ -42,8 +45,7 @@ def get_venv_exe(directory):
 
 
 class EnvBuilder(venv.EnvBuilder):
-    """Builder for the virtual enviroments for each brick
-    """
+    """Builder for the virtual enviroments for each brick"""
 
     def __init__(self, logger, *args, **kwargs):
         self.log = logger
@@ -52,7 +54,7 @@ class EnvBuilder(venv.EnvBuilder):
         super().__init__(*args, **kwargs)
 
     def post_setup(self, context):
-        """ install platforma and brick requirements during
+        """install platforma and brick requirements during
         setup of the virtual environment
         """
         self.exe = context.env_exe
@@ -77,7 +79,7 @@ class EnvBuilder(venv.EnvBuilder):
     def log_stdout(self, pipe):
         for line in pipe.readlines():
             if line:
-                self.log.info(line.decode())
+                self.log.info(line.from_bytes())
 
     def install_requirements(self, context):
         """install requirements in virtual environment"""
@@ -90,7 +92,10 @@ class EnvBuilder(venv.EnvBuilder):
         binpath = context.bin_path
         get_requirements = [context.env_exe, "-m", "pip", "install", "-r", requirements]
         with subprocess.Popen(
-            get_requirements, cwd=binpath, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
+            get_requirements,
+            cwd=binpath,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
         ) as process:
             with process.stdout:
                 self.log_stdout(process.stdout)
@@ -99,9 +104,65 @@ class EnvBuilder(venv.EnvBuilder):
             raise RuntimeError(f"Failed to install requirements. ({exitcode})")
 
 
+@dataclass
+class PortDescription:
+    id: str
+    typeName: str
+    schema: str
+
+
+@dataclass
+class Connection:
+    InstanceID: str
+    sourcePort: PortDescription
+    targetPort: PortDescription
+    autoscale_queue_level: int
+    mapping: T.List["PortMapping"]
+    buffer: T.Dict
+
+    @property
+    def target_queue(self):
+        return truncate(self.InstanceID + ":" + self.targetPort.id, 255)
+
+    def __post_init__(self):
+        if isinstance(self.sourcePort, T.Mapping):
+            self.sourcePort = PortDescription(**self.sourcePort)
+
+        if isinstance(self.targetPort, T.Mapping):
+            self.targetPort = PortDescription(**self.targetPort)
+
+
+@dataclass
+class Connections:
+    input: T.Dict["PortName", T.List[Connection]]
+    output: T.Dict["PortName", T.List[Connection]]
+
+    def __post_init__(self):
+
+        for port_name, connections in self.input.items():
+            self.input[port_name] = [
+                (
+                    Connection(**connection)
+                    if isinstance(connection, T.Mapping)
+                    else connection
+                )
+                for connection in connections
+            ]
+
+        for port_name, connections in self.output.items():
+            self.output[port_name] = [
+                (
+                    Connection(**connection)
+                    if isinstance(connection, T.Mapping)
+                    else connection
+                )
+                for connection in connections
+            ]
+
+
 class BrickBaseDefinition:
     """
-    The general definition of a brick contains it's name and id, as well as the module itself
+    The general definition of a brick contains its name and id, as well as the module itself
     and possibly a set of default parameters for that module read from the annexed config.yaml
     """
 
@@ -146,11 +207,14 @@ class BrickBaseDefinition:
             )
         except (FileNotFoundError, StopIteration):
             self.log.warning(
-                "Missing module `%s/` or `%s.py` in %s", self.name, self.name, module_parent
+                "Missing module `%s/` or `%s.py` in %s",
+                self.name,
+                self.name,
+                module_parent,
             )
 
     def create_virtual_env(self):
-        """ create a virtual enviroment for the brick"""
+        """create a virtual enviroment for the brick"""
         environment = EnvBuilder(
             logger=self.log,
             system_site_packages=True,
@@ -169,10 +233,12 @@ class BrickBaseDefinition:
         self.exe = environment.exe
 
     def __repr__(self):
-        return f"Base({self.uid}, {self.name}, " f"module_path={self.module_path})"
+        return (
+            f"Base({self.uid!r}, {self.name!r}, " f"module_path={self.module_path!r})"
+        )
 
     async def install_or_update(self, update=True, force_update=False):
-        """ Get a brick from the package manager and install it"""
+        """Get a brick from the package manager and install it"""
         module_parent = Path(configuration.brick_folder)
         destination = module_parent / self.uid
 
@@ -183,9 +249,9 @@ class BrickBaseDefinition:
 
             if not force_update:
                 last_modified_local = destination.stat().st_mtime
-                if datetime.utcfromtimestamp(last_modified_local) >= datetime.utcfromtimestamp(
-                    self.last_modified
-                ):
+                if datetime.utcfromtimestamp(
+                    last_modified_local
+                ) >= datetime.utcfromtimestamp(self.last_modified):
                     return
 
             shutil.rmtree(destination)
@@ -210,42 +276,22 @@ class BrickBaseDefinition:
         self.create_virtual_env()
 
 
+@dataclass
 class BrickInstanceDefinition:
     """
     The Brick Instance Definition is a fully configured brick in a flow context.
-    It should have it's own name and uid within the flow, precise parameters
+    It should have its own name and uid within the flow, precise parameters
     and possibly connections to other bricks.
     """
 
-    def __init__(  # pylint:disable= too-many-arguments
-        self,
-        uid,
-        name,
-        ports: Ports,
-        flow: Flow,
-        base: BrickBaseDefinition,
-        processing_parameters: dict,
-        runtime_parameters: RuntimeParameters,
-        connections: Connections,
-    ):
-        self.flow = flow
-        self.uid = uid
-        self.name = name
-        self.base = base
-        self.ports = ports
-
-        self.processing_parameters = processing_parameters
-        self.runtime_parameters = runtime_parameters
-        self.connections = connections
-
-    def __repr__(self):
-        return (
-            f"Brick({self.uid}, {self.name}, flow={self.flow}, "
-            f"base={self.base}, "
-            f"processing_parameters={self.processing_parameters}, "
-            f"runtime_parameters={self.runtime_parameters}, "
-            f")"
-        )
+    uid: str
+    name: str
+    ports: Ports
+    flow: Flow
+    base: BrickBaseDefinition
+    processing_parameters: dict
+    runtime_parameters: RuntimeParameters
+    connections: Connections
 
     def __hash__(self):
         return hash(self.uid)
@@ -269,15 +315,25 @@ class BrickInstanceDefinition:
         )
 
         logger = logging.TitanPlatformLogger(
-            __name__, context=logging.FlowContext(flow.uid, flow.name, instance_uid, instance_name)
+            __name__,
+            context=logging.FlowContext(
+                flow.uid, flow.name, instance_uid, instance_name
+            ),
         )
 
         base = BrickBaseDefinition(
-            uid=config["id"], name=config["brick"], family=config["family"], logger=logger
+            uid=config["id"],
+            name=config["brick"],
+            family=config["family"],
+            logger=logger,
+        )
+        runtime_params = RuntimeParameters(
+            *[config[f] for f in RuntimeParameters._fields]
         )
-        runtime_params = RuntimeParameters(*[config[f] for f in RuntimeParameters._fields])
         processing_params = config["parameters"]
-        connections = Connections(brick_description["Inbound"], brick_description["Outbound"])
+        connections = Connections(
+            brick_description["Inbound"], brick_description["Outbound"]
+        )
 
         instance = cls(
             instance_uid,
@@ -290,3 +346,14 @@ class BrickInstanceDefinition:
             connections,
         )
         return instance
+
+    @property
+    def message_exchange(self):
+        return truncate(self.flow.uid + ":" + self.flow.name, 255)
+
+    @property
+    def input_queues(self):
+        return {
+            port["name"]: truncate(self.uid + ":" + port["name"], 255)
+            for port in self.ports.input
+        }
diff --git a/titanfe/apps/control_peer/config.yaml b/titanfe/apps/control_peer/config.yaml
index 1ba8797..c021646 100644
--- a/titanfe/apps/control_peer/config.yaml
+++ b/titanfe/apps/control_peer/config.yaml
@@ -3,7 +3,7 @@ GridManager : "http://localhost:8080/gridmanager"  # Address of the GridManager
 FlowManager: "http://localhost:9002/flowmanager" # Address of the FlowManager
 PackageManager: "http://localhost:8087/packagemanager" # Address of the FlowManager
 RepositoryService: "http://localhost:8085/object" # Address of the Repository service
-Kafka: "10.14.0.23:9092" #Address of the Kafka bootstrap server"
+#Kafka: "10.14.0.23:9092" #Address of the Kafka bootstrap server"
 #Kafka:" localhost:9092"
 #Kafka: "192.168.69.128:9092"
 IP: "192.168.178.43" #IP address of the host where the CP runs
diff --git a/titanfe/apps/control_peer/runner.py b/titanfe/apps/control_peer/runner.py
index 2ec459c..b7c40c8 100644
--- a/titanfe/apps/control_peer/runner.py
+++ b/titanfe/apps/control_peer/runner.py
@@ -81,7 +81,7 @@ class BrickRunner:
 
     async def check_termination(self):
         """
-        do cyclic checks for an exitcode of the brick runner's process to detect it's termination
+        do cyclic checks for an exitcode of the brick runner's process to detect its termination
         """
         exitcode = None
         while exitcode is None:
diff --git a/titanfe/apps/control_peer/services.py b/titanfe/apps/control_peer/services.py
index 51106c7..f654ccd 100644
--- a/titanfe/apps/control_peer/services.py
+++ b/titanfe/apps/control_peer/services.py
@@ -12,7 +12,7 @@ from abc import ABC, abstractmethod
 
 from aiohttp.client_exceptions import ClientError
 from aiohttp_requests import Requests  # initiate a new client every time,
-# because we don't know how many threads are used and each will have it's own asyncio loop
+# because we don't know how many threads are used and each will have its own asyncio loop
 # there must be a better way, but right now I'm short on time...
 
 from titanfe import log as logging
diff --git a/titanfe/config.py b/titanfe/config.py
index a3b374c..054b72e 100644
--- a/titanfe/config.py
+++ b/titanfe/config.py
@@ -7,6 +7,7 @@
 
 """ the global configuration """
 
+from dataclasses import dataclass
 import os
 from ast import literal_eval
 
@@ -34,44 +35,42 @@ class NotFound:  # pylint: disable=too-few-public-methods
 
 NOTFOUND = NotFound()
 
-
+OPTION_ALIASES = {
+    "IP": "IP",
+    "gridmanager_address": "GridManager",
+    "flowmanager_address": "FlowManager",
+    "packagemanager_address": "PackageManager",
+    "reposervice_address": "RepositoryService",
+    "kafka_bootstrap_servers": "Kafka",
+    "kafka_log_topic": "KafkaLogTopic",
+    "brick_folder": "BrickFolder",
+    "secret_key": "SecretKey",
+    "endpoint_provider": "EndpointProvider"
+}
+@dataclass
 class Configuration:
     """Current Configuration"""
 
-    def __init__(self):
-        self.kafka_bootstrap_servers = DEFAULT_KAFKA_BOOTSTRAP_SERVER
-        self.kafka_log_topic = DEFAULT_KAFKA_LOG_TOPIC
-
-        self.no_kafka_today = literal_eval(
-            os.getenv("TITAN_METRICS_DISABLED") or os.getenv("TITANFE_WITHOUT_KAFKA") or "False"
-        )
-
-        self.gridmanager_address = DEFAULT_GRIDMANAGER_ADDRESS
-        self.flowmanager_address = DEFAULT_FLOWMANAGER_ADDRESS
-        self.packagemanager_address = DEFAULT_PACKAGEMANAGER_ADDRESS
-        self.reposervice_address = DEFAULT_REPOSERVICE_ADDRESS
-        self.secret_key = os.getenv("TITAN_SECRET_KEY") or None
-        self.endpoint_provider = DEFAULT_ENDPOINTPROVIDER_ADDRESS
-        self.IP = None
-
-        self.brick_folder = str(Path.home() / "titanfe/bricks")
-
-    option_aliases = {
-        "IP": "IP",
-        "gridmanager_address": "GridManager",
-        "flowmanager_address": "FlowManager",
-        "packagemanager_address": "PackageManager",
-        "reposervice_address": "RepositoryService",
-        "kafka_bootstrap_servers": "Kafka",
-        "kafka_log_topic": "KafkaLogTopic",
-        "brick_folder": "BrickFolder",
-        "secret_key": "SecretKey",
-        "endpoint_provider": "EndpointProvider"
-    }
+    kafka_bootstrap_servers :str = DEFAULT_KAFKA_BOOTSTRAP_SERVER
+    kafka_log_topic :str= DEFAULT_KAFKA_LOG_TOPIC
+
+    no_kafka_today :bool = literal_eval(
+        os.getenv("TITAN_METRICS_DISABLED") or os.getenv("TITANFE_WITHOUT_KAFKA") or "False"
+    )
+
+    gridmanager_address :str = DEFAULT_GRIDMANAGER_ADDRESS
+    flowmanager_address :str = DEFAULT_FLOWMANAGER_ADDRESS
+    packagemanager_address :str = DEFAULT_PACKAGEMANAGER_ADDRESS
+    reposervice_address :str = DEFAULT_REPOSERVICE_ADDRESS
+    secret_key :str = os.getenv("TITAN_SECRET_KEY") or None
+    endpoint_provider :str = DEFAULT_ENDPOINTPROVIDER_ADDRESS
+    IP :str = None
+
+    brick_folder :str = str(Path.home() / "titanfe/bricks")
 
     def update(self, config: Union["Configuration", dict]):
         """update config from dict or other config"""
-        for attr, alias in self.option_aliases.items():
+        for attr, alias in OPTION_ALIASES.items():
             if isinstance(config, Configuration):
                 value = getattr(config, attr, NOTFOUND)
             else:
diff --git a/titanfe/connection.py b/titanfe/connection.py
index da022fd..07e9ce4 100644
--- a/titanfe/connection.py
+++ b/titanfe/connection.py
@@ -17,7 +17,7 @@ from typing import Optional
 from ujotypes import UjoMap, read_buffer, ujo_to_python, UjoStringUTF8
 
 import titanfe.log
-from titanfe.apps.brick_runner.connection import Buffer
+from titanfe.apps.brick_runner.value_mapping import Buffer
 from titanfe.ujo_helper import py_to_ujo_bytes
 from titanfe.messages import Message
 
@@ -96,7 +96,7 @@ class Connection:
         return cls(reader, writer, log)
 
     async def close(self):
-        """close the connection by closing it's reader and writer"""
+        """close the connection by closing its reader and writer"""
         if self.closed:
             return
         self.reader.feed_eof()
@@ -108,7 +108,7 @@ class Connection:
         self.closed = True
 
     async def receive(self):
-        """wait until a message comes through and return it's content after decoding
+        """wait until a message comes through and return its content after decoding
 
         Return:
              Message: a message or None if the connection was closed remotely
diff --git a/titanfe/log.py b/titanfe/log.py
index 9b65054..41aa2a9 100644
--- a/titanfe/log.py
+++ b/titanfe/log.py
@@ -204,7 +204,7 @@ class UjoBinFormatter(logging.Formatter):
         message = record.getMessage()
 
         if record.exc_info:
-            # Cache the traceback text to avoid converting it multiple times (it's constant anyway)
+            # Cache the traceback text to avoid converting it multiple times (its constant anyway)
             if not record.exc_text:
                 record.exc_text = self.formatException(record.exc_info)
         if record.exc_text:
diff --git a/titanfe/log_config.yml b/titanfe/log_config.yml
index 8b80f28..59c1dc8 100644
--- a/titanfe/log_config.yml
+++ b/titanfe/log_config.yml
@@ -15,15 +15,17 @@ handlers:
 loggers:
   titanfe:
     level:
-      DEBUG
+      #DEBUG
       #METRIC
       #INFO
-      #WARNING
+      WARNING
+      #ERROR
 
   titanfe.bricks:
-    #level: INFO
-    level: DEBUG
-
+    level:
+      #DEBUG
+      INFO
+      #ERROR
 
 root:
   # level: DEBUG  # enable to see asyncio/kafka/etc
diff --git a/titanfe/utils.py b/titanfe/utils.py
index f2174e8..3ef8031 100644
--- a/titanfe/utils.py
+++ b/titanfe/utils.py
@@ -257,3 +257,9 @@ class Flag(asyncio.Event):
 
 def generate_key(secret_key, salt):
     return hashlib.pbkdf2_hmac('sha1', secret_key, salt, dklen=32, iterations=4096)
+
+
+def truncate(text: str, max_length: int) -> str:
+    if len(text) >= max_length:
+        text = text[:max_length-3] + "..."
+    return text
-- 
GitLab


From 71865e1629a90df3b66627eb5101bdefc03180d3 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 14 Sep 2023 16:18:31 +0200
Subject: [PATCH 02/29] fix the TestRunner

---
 titanfe/apps/brick_runner/output.py |  20 ++---
 titanfe/apps/brick_runner/runner.py |  14 +--
 titanfe/testing/testrunner.py       | 129 +++++++++++++---------------
 3 files changed, 78 insertions(+), 85 deletions(-)

diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
index 2758730..484822d 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/titanfe/apps/brick_runner/output.py
@@ -7,20 +7,17 @@
 
 """The output with its server and ports"""
 import asyncio
+import typing as T
 from copy import deepcopy, copy
 from dataclasses import dataclass
 
 from UJOSchema import schema_to_type
 
-from ujotypes import UjoBase, UJO_VARIANT_NONE
-
 import titanfe.log
+from titanfe.apps.brick_runner.packet import Packet
 from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
 from titanfe.apps.control_peer.brick import Connection
-from titanfe.apps.brick_runner.packet import Packet
-
-from titanfe.apps.brick_runner import transport
-import typing as T
+from ujotypes import UjoBase, UJO_VARIANT_NONE
 
 
 @dataclass
@@ -35,9 +32,7 @@ class Consumer:
         return cls(
             queue_name=connection.target_queue,
             target_type=(
-                schema_to_type(
-                    connection.targetPort.schema, connection.targetPort.typeName
-                )
+                schema_to_type(connection.targetPort.schema, connection.targetPort.typeName)
                 if connection.targetPort.schema
                 else UJO_VARIANT_NONE
             ),
@@ -91,7 +86,7 @@ class Output:
     def __init__(
         self,
         output_connections: T.Dict["PortName", T.List[Connection]],
-        transport: transport.RabbitMQ,
+        transport: "transport.RabbitMQ",
         logger=None,
     ):
         self.transport = transport
@@ -112,5 +107,8 @@ class Output:
         consumers = self[port].consumers
         self.log.debug("publish %r on port %r to consumers: %r", packet, port, consumers)
         await asyncio.gather(
-            *(self.transport.publish(consumer.queue_name, consumer.create_packet(packet)) for consumer in consumers)
+            *(
+                self.transport.publish(consumer.queue_name, consumer.create_packet(packet))
+                for consumer in consumers
+            )
         )
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index 55b1a74..f8f3ed6 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -44,7 +44,7 @@ class BrickRunner:
         self.server = None
         self.address = (None, None)
         self.gridmanager = None
-        self.rabbitmq = None
+        self.message_transport = None
 
         self.setup_completed = asyncio.Event()
 
@@ -66,15 +66,17 @@ class BrickRunner:
             context=logging.global_context,
         )
 
-        self.rabbitmq = RabbitMQ(brick_definition.message_exchange)
+        self.message_transport = RabbitMQ(brick_definition.message_exchange)
         self.input = Input(
             brick_definition.input_queues,
             max_idle_time=brick_definition.runtime_parameters.exit_after_idle_seconds,
-            transport=self.rabbitmq,
-            logger=self.log
+            transport=self.message_transport,
+            logger=self.log,
         )
         self.output = Output(
-            brick_definition.connections.output, transport=self.rabbitmq, logger=self.log
+            brick_definition.connections.output,
+            transport=self.message_transport,
+            logger=self.log,
         )
 
         self.metric_emitter = await MetricEmitter.create_from_brick_runner(self)
@@ -127,7 +129,7 @@ class BrickRunner:
         logging.flush_kafka_log_handler()
         await self.input.stop()
         self.brick.terminate()
-        await self.rabbitmq.disconnect()
+        await self.message_transport.disconnect()
         await self.metric_emitter.stop()
 
     async def shutdown(self):
diff --git a/titanfe/testing/testrunner.py b/titanfe/testing/testrunner.py
index d20cf57..1bba043 100644
--- a/titanfe/testing/testrunner.py
+++ b/titanfe/testing/testrunner.py
@@ -16,11 +16,14 @@ import logging
 import queue
 import sys
 import threading
+from collections import defaultdict
+from datetime import datetime
 from typing import Union
 from unittest.mock import MagicMock
 
 import janus
 
+from apps.brick_runner.input import Input as OriginalInput
 from titanfe.apps.brick_runner.brick import Brick
 from titanfe.apps.brick_runner.packet import Packet
 from titanfe.apps.brick_runner.runner import BrickRunner
@@ -37,6 +40,7 @@ logging.basicConfig(
 )
 
 LOG = TitanPlatformLogger(__name__)
+LOG.setLevel(logging.DEBUG)
 
 
 async def async_magic():
@@ -66,68 +70,35 @@ class GridManagerDummy:
     register_runner = MagicMock()
 
 
-class Input:
+class Input(OriginalInput):
     """TestRunner: Input replacement"""
 
     def __init__(self):
-        self.queue = janus.Queue()
-        self.packets = self.queue.async_q
+        self.Q = janus.Queue()
 
-        self._close = False
-        self._getter = None
+        self.max_idle_time = 1  # give it at least a chance to run
+        self.log = LOG
 
-    def __aiter__(self):
-        return self
+        self._getter = asyncio.Future()
+        self._packets = self.Q.async_q
 
-    async def __anext__(self) -> Packet:
-        while not self._close:
-            self._getter = asyncio.create_task(self.get())
-            try:
-                # careful: without timeout the get sometimes hangs on `self.packets.get()`
-                #          in that case the brick runner does not shutdown correctly.
-                # TODO: find out if there's a better way...
-                return await asyncio.wait_for(self._getter, timeout=0.05)
-            except asyncio.CancelledError:
-                raise StopAsyncIteration
-            except asyncio.TimeoutError:
-                pass  # retry
-        raise StopAsyncIteration
-
-    async def get(self):
-        """awaitable to get the next available packet from the input queue"""
-        packet = await self.packets.get()
-        if not isinstance(packet, Packet):
-            payload, port = packet
-            packet = Packet(port=port, payload=payload)
-
-        self.packets.task_done()
-
-        packet.update_input_exit()
-        return packet
-
-    async def put(self, item):
-        return await self.packets.put(item)
+    async def start(self):
+        pass
 
-    async def close(self):
-        """Stop the input"""
-        self.sync_close()
+    async def stop(self):
+        self._getter.cancel()
 
-    def sync_close(self):
-        self._close = True
-        if self._getter:
-            self._getter.cancel()
-        self.queue.close()
+    def mark_done(self, packet):
+        self._packets.task_done()
 
-    @property
-    def is_empty(self):
-        return self.packets.empty()
 
+class Output:
+    def __init__(self):
+        self.Q = janus.Queue()
+        self.get = self.Q.sync_q.get
 
-class Output(queue.Queue):
-    """TestRunner: Output replacement"""
-    @property
-    def is_empty(self):
-        return self.empty()
+    async def put(self, packet, port):
+        await self.Q.async_q.put((port, packet.payload))
 
     async def close(self):
         pass
@@ -176,7 +147,6 @@ class TestRunner:
         LOG.setLevel(log_level)
         self.uid = create_uid("Test-")
         self.thread = threading.Thread(target=self.run_async_create_and_run)
-        self.output = queue.Queue()
         self.terminate = threading.Event()
 
         self.runner = BrickRunner("R-TestRunner")
@@ -195,7 +165,7 @@ class TestRunner:
                 "parameters": {},
                 "autoscale_queue_level": 25,
                 "autoscale_max_instances": 1,
-                "exit_after_idle_seconds": 0,
+                "exit_after_idle_seconds": 1,
                 "inputPorts": [],
                 "outputPorts": [],
             },
@@ -204,7 +174,24 @@ class TestRunner:
             "FlowSchema": "fixme",
             "Inbound": {},
             "Outbound": {
-                DEFAULT_PORT: [{"InstanceID": "dummy", "autoscale_queue_level": 0, "Port": "Input"}]
+                DEFAULT_PORT: [
+                    {
+                        "InstanceID": "Next-Brick",
+                        "autoscale_queue_level": 0,
+                        "mapping": [],
+                        "buffer": {},
+                        "sourcePort": {
+                            "id": "Out",
+                            "typeName": "test",
+                            "schema": "test = variant;",
+                        },
+                        "targetPort": {
+                            "id": "Input",
+                            "typeName": "test",
+                            "schema": "test = variant;",
+                        },
+                    }
+                ]
             },
         }
 
@@ -225,15 +212,11 @@ class TestRunner:
             self._create_and_run_runner(self.brick_class_or_path_to_module, self.parameters)
         )
 
-    async def _create_and_run_runner(
-        self, brick_class_or_path_to_module, parameters
-    ):
+    async def _create_and_run_runner(self, brick_class_or_path_to_module, parameters):
         runner = self.runner
         runner.gridmanager = GridManagerDummy()
         runner.metric_emitter = MetricEmitterDummy()
         runner.server = MagicMock()
-        runner.input = Input()
-        runner.output = Output()
 
         is_brick = (
             inspect.isclass(brick_class_or_path_to_module)
@@ -242,6 +225,13 @@ class TestRunner:
 
         instance_definition = BrickInstanceDefinition.from_gridmanager(self.definition)
 
+        runner.message_transport = MagicMock()
+
+        runner.input = Input()
+        runner.output = Output()
+
+        runner.metric_emitter.set_metadata_from_runner(runner)
+
         if is_brick:
             instance_definition.base.module_path = "sys"
         else:
@@ -249,7 +239,7 @@ class TestRunner:
 
         instance_definition.processing_parameters.update(**parameters)
 
-        runner.brick = Brick(instance_definition, runner.metric_emitter, LOG)
+        runner.brick = Brick(instance_definition, runner.metric_emitter, LOG, runner.output)
 
         if is_brick:
 
@@ -260,7 +250,6 @@ class TestRunner:
 
         runner.brick.is_inlet = issubclass(runner.brick.module.Brick, InletBrickBase)
 
-        runner.tasks.append(asyncio.create_task(self.output_results()))
         runner.tasks.append(asyncio.create_task(self.check_terminate(runner)))
 
         runner.setup_completed.set()
@@ -291,13 +280,18 @@ class TestRunner:
                 self.name = name
 
             def put(self, item):
-                runner.input.queue.sync_q.put((item, self.name))
+                if not isinstance(item, Packet):
+                    item = Packet(payload=item)
+                runner.input.Q.sync_q.put((self.name, item))
 
         class InputProxy:
             """To get port/payload into the running runner..."""
+
             @staticmethod
             def put(item):
-                runner.input.queue.sync_q.put((item, DEFAULT_PORT))
+                if not isinstance(item, Packet):
+                    item = Packet(payload=item)
+                runner.input.Q.sync_q.put((DEFAULT_PORT, item))
 
             @staticmethod
             def __getitem__(name):
@@ -305,15 +299,14 @@ class TestRunner:
 
         return InputProxy()
 
-    async def output_results(self):
-        """get results from the brick execution and add them to the output queues of this runner"""
-        async for packet, port in self.runner.brick.get_results():
-            self.output.put((port, packet.payload))
+    @property
+    def output(self):
+        return self.runner.output
 
     def start(self):
         self.thread.start()
 
     def stop(self):
-        self.runner.input.sync_close()
+        self.runner.input._getter.cancel()
         self.terminate.set()
         self.thread.join()
-- 
GitLab


From 3aff5a5023d4c568b0076e221c32509a4ef154b6 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 14 Sep 2023 16:20:04 +0200
Subject: [PATCH 03/29] paint it black! (line-length: 100)

---
 titanfe/apps/brick_runner/__main__.py      |  4 +--
 titanfe/apps/brick_runner/adapter.py       | 16 ++++-----
 titanfe/apps/brick_runner/brick.py         | 23 ++++---------
 titanfe/apps/brick_runner/grid_manager.py  |  2 +-
 titanfe/apps/brick_runner/input.py         | 10 ++----
 titanfe/apps/brick_runner/transport.py     | 10 +++---
 titanfe/apps/brick_runner/value_mapping.py |  2 +-
 titanfe/apps/control_peer/__main__.py      |  4 ++-
 titanfe/apps/control_peer/brick.py         | 38 ++++++----------------
 titanfe/apps/control_peer/control_peer.py  |  8 ++---
 titanfe/apps/control_peer/services.py      | 30 ++++++++++-------
 titanfe/apps/control_peer/webapi/state.py  |  1 +
 titanfe/apps/kafka_to_elastic/__main__.py  |  7 ++--
 titanfe/apps/kafka_viewer/__main__.py      |  4 ++-
 titanfe/brick.py                           | 10 +++---
 titanfe/config.py                          | 26 ++++++++-------
 titanfe/connection.py                      | 15 ++++-----
 titanfe/get-pip.py                         |  2 +-
 titanfe/log.py                             | 16 ++++-----
 titanfe/messages.py                        |  1 +
 titanfe/repository.py                      | 17 ++++++----
 titanfe/utils.py                           | 15 +++++----
 22 files changed, 125 insertions(+), 136 deletions(-)

diff --git a/titanfe/apps/brick_runner/__main__.py b/titanfe/apps/brick_runner/__main__.py
index 3333a6a..c3cfe30 100644
--- a/titanfe/apps/brick_runner/__main__.py
+++ b/titanfe/apps/brick_runner/__main__.py
@@ -27,8 +27,7 @@ else:
 
 
 async def run_app(args):
-    """ let's do this
-    """
+    """let's do this"""
 
     configuration.update(pickle.loads(args.configuration))
     logging.initialize("BrickRunner")
@@ -43,7 +42,6 @@ async def run_app(args):
 
 
 def main():
-
     """parse args and run the application"""
     arg_parser = argparse.ArgumentParser()
     arg_parser.add_argument("-id", type=str, help="Brick Runner ID")  # uuid.UUID,
diff --git a/titanfe/apps/brick_runner/adapter.py b/titanfe/apps/brick_runner/adapter.py
index d8e05a5..3996a28 100644
--- a/titanfe/apps/brick_runner/adapter.py
+++ b/titanfe/apps/brick_runner/adapter.py
@@ -22,7 +22,7 @@ TAG_LEN = 16
 
 @dataclass
 class AdapterMeta:
-    """ flow/brick meta data to be made available for access inside a brick """
+    """flow/brick meta data to be made available for access inside a brick"""
 
     brick: MetaData
     flow: MetaData
@@ -69,9 +69,7 @@ class BrickAdapter:  # pylint: disable=too-few-public-methods
             value (Any): Any value
         """
 
-        self.log.debug(
-            "brick emitted new value: %r , port: %s", value, port or self.__default_port
-        )
+        self.log.debug("brick emitted new value: %r , port: %s", value, port or self.__default_port)
         self.__put_packet(value, port or self.__default_port)
 
     def decrypt_parameter(self, parameter):  # pylint: disable=no-self-use
@@ -81,13 +79,14 @@ class BrickAdapter:  # pylint: disable=too-few-public-methods
             parameter(String): hex encoded encryped parameter
         """
 
-        key = generate_key(bytes(configuration.secret_key, 'utf-8'),
-                           bytes(self.meta.flow.uid, 'utf-8'))
+        key = generate_key(
+            bytes(configuration.secret_key, "utf-8"), bytes(self.meta.flow.uid, "utf-8")
+        )
         parameter_bytes = bytes.fromhex(parameter)
         nonce = parameter_bytes[:NONCE_LEN]
         cipher = AES.new(key, AES.MODE_GCM, nonce=nonce)
 
-        encrypted_parameter = parameter_bytes[NONCE_LEN:len(parameter_bytes)-TAG_LEN]
+        encrypted_parameter = parameter_bytes[NONCE_LEN : len(parameter_bytes) - TAG_LEN]
         decrypted_parameter = cipher.decrypt(encrypted_parameter)
         return decrypted_parameter.decode()
 
@@ -113,8 +112,7 @@ class State:
         self.__repository_service.store(self.collection, self.document, value)
 
     def reset(self):
-        """Delete brick state
-        """
+        """Delete brick state"""
         self.log.debug("deleting document %r of collection %r", self.document, self.collection)
         self.__repository_service.delete(self.collection, self.document)
 
diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index 20befbf..e53cef8 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -31,6 +31,7 @@ PortMapping = namedtuple("PortMapping", ("rules", "type"))
 
 SENTINEL = object()
 
+
 class Brick:
     """Wraps all the Brick-Handling"""
 
@@ -52,18 +53,14 @@ class Brick:
 
         self.processing_parameters = instance_definition.processing_parameters
 
-        self.default_port = next(
-            iter(instance_definition.connections.output), DEFAULT_PORT
-        )
+        self.default_port = next(iter(instance_definition.connections.output), DEFAULT_PORT)
         self.is_inlet = not instance_definition.connections.input
         self.is_outlet = not instance_definition.connections.output
 
         self.brick_type = instance_definition.base.name
         self.brick_family = instance_definition.base.family
 
-        context = logging.FlowContext(
-            self.flow.uid, self.flow.name, self.uid, self.name
-        )
+        context = logging.FlowContext(self.flow.uid, self.flow.name, self.uid, self.name)
         logging.global_context.update(context.asdict())
 
         self.log = logger.getChild("Brick")
@@ -73,9 +70,7 @@ class Brick:
         self._brick_output = janus.Queue()
 
         self.adapter = BrickAdapter(
-            AdapterMeta(
-               brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)
-            ),
+            AdapterMeta(brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)),
             self.adapter_output_callback,
             self.log,
             self.default_port,
@@ -89,9 +84,7 @@ class Brick:
         try:
             self.instance = self.module.Brick(self.adapter, self.processing_parameters)
         except AttributeError:
-            self.log.with_context.warning(
-                "Brick class is missing in module: %r", self.module
-            )
+            self.log.with_context.warning("Brick class is missing in module: %r", self.module)
             raise ImportError(f"Brick class is missing in module: {self.module}")
 
     def terminate(self):
@@ -132,9 +125,7 @@ class Brick:
             payload = packet.payload
 
         loop = asyncio.get_event_loop()
-        execution = loop.run_in_executor(
-            None, self.run_instance_processing, payload, packet.port
-        )
+        execution = loop.run_in_executor(None, self.run_instance_processing, payload, packet.port)
 
         if not self.is_outlet:
             await self.process_output(parent_packet=packet)
@@ -191,4 +182,4 @@ class Brick:
         packet.payload = payload
 
         self.log.debug("brick output on port [%s]: %r", port, packet)
-        await self.output.put(packet, port)
\ No newline at end of file
+        await self.output.put(packet, port)
diff --git a/titanfe/apps/brick_runner/grid_manager.py b/titanfe/apps/brick_runner/grid_manager.py
index fe6e5ac..86ab825 100644
--- a/titanfe/apps/brick_runner/grid_manager.py
+++ b/titanfe/apps/brick_runner/grid_manager.py
@@ -17,7 +17,7 @@ from titanfe.config import configuration
 
 
 class GridManager:
-    """GridManager """
+    """GridManager"""
 
     def __init__(self, runner_uid, brick_uid):
         self.runner_uid = runner_uid
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index 8767f45..23b4f4f 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -31,7 +31,7 @@ class Input:
 
         self._getter = asyncio.Future()
         self._queues = input_queues
-        self._packets = asyncio.Queue()#maxsize=1)
+        self._packets = asyncio.Queue()  # maxsize=1)
         self._packets_done = {}
 
     def __aiter__(self):
@@ -58,18 +58,14 @@ class Input:
         self.log.debug("start consumers: %r", self._queues)
         await asyncio.gather(
             *(
-                self.transport.start_consumer(
-                    queue, functools.partial(self._on_new_message, port)
-                )
+                self.transport.start_consumer(queue, functools.partial(self._on_new_message, port))
                 for port, queue in self._queues.items()
             )
         )
 
     async def stop(self):
         self.log.debug("stop consumers: %r", self._queues)
-        await asyncio.gather(
-            self.transport.stop_consumer(queue) for queue in self._queues.values()
-        )
+        await asyncio.gather(self.transport.stop_consumer(queue) for queue in self._queues.values())
         self._getter.cancel()
 
     async def _on_new_message(self, port, packet, done_callback: T.Callable):
diff --git a/titanfe/apps/brick_runner/transport.py b/titanfe/apps/brick_runner/transport.py
index ae30b57..c982eb8 100644
--- a/titanfe/apps/brick_runner/transport.py
+++ b/titanfe/apps/brick_runner/transport.py
@@ -6,12 +6,15 @@ import aioamqp.channel
 
 import titanfe.log
 
+
 class RabbitMQ:
     # TODO: make "robust" - handle reconnect and stuff
     #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
 
     def __init__(self, exchange_name, logger=None):
-        self.log = logger.getChild("transport.RabbitMQ") if logger else titanfe.log.getLogger(__name__)
+        self.log = (
+            logger.getChild("transport.RabbitMQ") if logger else titanfe.log.getLogger(__name__)
+        )
         self.exchange_name = exchange_name
         self._consumers = {}
         self._connection: aioamqp.protocol.AmqpProtocol = None
@@ -33,9 +36,7 @@ class RabbitMQ:
         if not self._connection:
             return
 
-        await asyncio.gather(
-            *(self.stop_consumer(consumer) for consumer in list(self._consumers))
-        )
+        await asyncio.gather(*(self.stop_consumer(consumer) for consumer in list(self._consumers)))
 
         await self._channel.close()
         await self._connection.close()
@@ -51,7 +52,6 @@ class RabbitMQ:
         return self._channel
 
     async def start_consumer(self, queue_name, on_new_message_callback):
-
         async def callback_wrapper(msgchannel, body, envelope, _):
             async def done_callback():
                 await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
diff --git a/titanfe/apps/brick_runner/value_mapping.py b/titanfe/apps/brick_runner/value_mapping.py
index 52afd0a..02e3216 100644
--- a/titanfe/apps/brick_runner/value_mapping.py
+++ b/titanfe/apps/brick_runner/value_mapping.py
@@ -119,7 +119,7 @@ class MappingRules:
         self.rules = [Rule(rule) for rule in rules]
 
     def apply(self, buffer, source, target):
-        """"convert ujo types according to its mapping rules"""
+        """ "convert ujo types according to its mapping rules"""
         for rule in self.rules:
             if rule.is_const:
                 try:
diff --git a/titanfe/apps/control_peer/__main__.py b/titanfe/apps/control_peer/__main__.py
index 856af13..8e825de 100644
--- a/titanfe/apps/control_peer/__main__.py
+++ b/titanfe/apps/control_peer/__main__.py
@@ -45,7 +45,9 @@ def main():
         default="../../../examples/demo_flow.yml",
     )
     arg_parser.add_argument(
-        "-brick_folder", help="Brick folder", default=str(Path.home() / "titanfe/bricks"),
+        "-brick_folder",
+        help="Brick folder",
+        default=str(Path.home() / "titanfe/bricks"),
     )
     arg_parser.add_argument(
         "-config_file",
diff --git a/titanfe/apps/control_peer/brick.py b/titanfe/apps/control_peer/brick.py
index 36b794f..20a2101 100644
--- a/titanfe/apps/control_peer/brick.py
+++ b/titanfe/apps/control_peer/brick.py
@@ -138,24 +138,15 @@ class Connections:
     output: T.Dict["PortName", T.List[Connection]]
 
     def __post_init__(self):
-
         for port_name, connections in self.input.items():
             self.input[port_name] = [
-                (
-                    Connection(**connection)
-                    if isinstance(connection, T.Mapping)
-                    else connection
-                )
+                (Connection(**connection) if isinstance(connection, T.Mapping) else connection)
                 for connection in connections
             ]
 
         for port_name, connections in self.output.items():
             self.output[port_name] = [
-                (
-                    Connection(**connection)
-                    if isinstance(connection, T.Mapping)
-                    else connection
-                )
+                (Connection(**connection) if isinstance(connection, T.Mapping) else connection)
                 for connection in connections
             ]
 
@@ -233,9 +224,7 @@ class BrickBaseDefinition:
         self.exe = environment.exe
 
     def __repr__(self):
-        return (
-            f"Base({self.uid!r}, {self.name!r}, " f"module_path={self.module_path!r})"
-        )
+        return f"Base({self.uid!r}, {self.name!r}, " f"module_path={self.module_path!r})"
 
     async def install_or_update(self, update=True, force_update=False):
         """Get a brick from the package manager and install it"""
@@ -249,9 +238,9 @@ class BrickBaseDefinition:
 
             if not force_update:
                 last_modified_local = destination.stat().st_mtime
-                if datetime.utcfromtimestamp(
-                    last_modified_local
-                ) >= datetime.utcfromtimestamp(self.last_modified):
+                if datetime.utcfromtimestamp(last_modified_local) >= datetime.utcfromtimestamp(
+                    self.last_modified
+                ):
                     return
 
             shutil.rmtree(destination)
@@ -316,9 +305,7 @@ class BrickInstanceDefinition:
 
         logger = logging.TitanPlatformLogger(
             __name__,
-            context=logging.FlowContext(
-                flow.uid, flow.name, instance_uid, instance_name
-            ),
+            context=logging.FlowContext(flow.uid, flow.name, instance_uid, instance_name),
         )
 
         base = BrickBaseDefinition(
@@ -327,13 +314,9 @@ class BrickInstanceDefinition:
             family=config["family"],
             logger=logger,
         )
-        runtime_params = RuntimeParameters(
-            *[config[f] for f in RuntimeParameters._fields]
-        )
+        runtime_params = RuntimeParameters(*[config[f] for f in RuntimeParameters._fields])
         processing_params = config["parameters"]
-        connections = Connections(
-            brick_description["Inbound"], brick_description["Outbound"]
-        )
+        connections = Connections(brick_description["Inbound"], brick_description["Outbound"])
 
         instance = cls(
             instance_uid,
@@ -354,6 +337,5 @@ class BrickInstanceDefinition:
     @property
     def input_queues(self):
         return {
-            port["name"]: truncate(self.uid + ":" + port["name"], 255)
-            for port in self.ports.input
+            port["name"]: truncate(self.uid + ":" + port["name"], 255) for port in self.ports.input
         }
diff --git a/titanfe/apps/control_peer/control_peer.py b/titanfe/apps/control_peer/control_peer.py
index a2796e4..3ec941f 100644
--- a/titanfe/apps/control_peer/control_peer.py
+++ b/titanfe/apps/control_peer/control_peer.py
@@ -24,8 +24,8 @@ log = logging.TitanPlatformLogger(__name__)
 
 class ControlPeer:
     """The control peer application will start runners as required for the flows/bricks
-       as described in the given config file. Once the runners have registered themselves,
-       they will get according assignments.
+    as described in the given config file. Once the runners have registered themselves,
+    they will get according assignments.
     """
 
     def __init__(self):
@@ -42,7 +42,7 @@ class ControlPeer:
 
     @classmethod
     def create(cls):
-        """"Create control peer"""
+        """ "Create control peer"""
         control_peer = cls()
         control_peer.install_signal_handlers()
 
@@ -142,7 +142,7 @@ class ControlPeer:
         flow_runners.discard(runner)
 
     async def stop_runners(self, flow_uid=None):
-        """ stop all runners or all runners for the given flow.uid """
+        """stop all runners or all runners for the given flow.uid"""
         if flow_uid:
             log.debug("Stopping brick runners for Flow: %s", flow_uid)
             runners = self.runners_by_flow.pop(flow_uid, set())
diff --git a/titanfe/apps/control_peer/services.py b/titanfe/apps/control_peer/services.py
index f654ccd..b766c2b 100644
--- a/titanfe/apps/control_peer/services.py
+++ b/titanfe/apps/control_peer/services.py
@@ -12,6 +12,7 @@ from abc import ABC, abstractmethod
 
 from aiohttp.client_exceptions import ClientError
 from aiohttp_requests import Requests  # initiate a new client every time,
+
 # because we don't know how many threads are used and each will have its own asyncio loop
 # there must be a better way, but right now I'm short on time...
 
@@ -27,6 +28,7 @@ class ServiceError(Exception):
 
 class ControlPeerServiceRegistration(ABC):
     """BaseClass to handle control peer registration of various services"""
+
     @property
     @abstractmethod
     def control_peer_endpoint(self):
@@ -37,8 +39,9 @@ class ControlPeerServiceRegistration(ABC):
         while True:
             try:
                 requests = Requests()
-                response = await requests.post(self.control_peer_endpoint,
-                                               json=json.dumps(own_api_address).strip('"'))
+                response = await requests.post(
+                    self.control_peer_endpoint, json=json.dumps(own_api_address).strip('"')
+                )
                 if response.status not in (HTTPStatus.OK, HTTPStatus.CREATED, HTTPStatus.ACCEPTED):
                     raise ServiceError(
                         f"Failed to register own API {own_api_address}"
@@ -46,20 +49,23 @@ class ControlPeerServiceRegistration(ABC):
                         f"{HTTPStatus(response.status)}"  # pylint: disable=no-value-for-parameter
                     )
 
-                log.info("Successfully registered own API <%s> at <%s>", own_api_address,
-                         self.control_peer_endpoint)
+                log.info(
+                    "Successfully registered own API <%s> at <%s>",
+                    own_api_address,
+                    self.control_peer_endpoint,
+                )
                 return
             except ClientError:
-                log.warning("Failed to register at <%s> - Retry",
-                            self.control_peer_endpoint)
+                log.warning("Failed to register at <%s> - Retry", self.control_peer_endpoint)
                 await asyncio.sleep(1)
 
     async def deregister(self, own_api_address):
         """Cancel registration at target_address"""
         try:
             requests = Requests()
-            response = await requests.delete(self.control_peer_endpoint,
-                                             json=json.dumps(own_api_address).strip('"'))
+            response = await requests.delete(
+                self.control_peer_endpoint, json=json.dumps(own_api_address).strip('"')
+            )
         except ClientError:
             log.warning("Removing registration from <%s> - Failed!", self.control_peer_endpoint)
         else:
@@ -74,7 +80,7 @@ class ControlPeerServiceRegistration(ABC):
 
 
 class PackageManager(ControlPeerServiceRegistration):
-    """ handle all requests to the package manager """
+    """handle all requests to the package manager"""
 
     @property
     def address(self):
@@ -114,12 +120,14 @@ class PackageManager(ControlPeerServiceRegistration):
 
     async def get_source_files(self, brick_id):
         """get the source files archive from the package manager"""
-        return await self.get(self.brick_code_endpoint + "/" + brick_id + ".zip",
-                              "Downloading source files")
+        return await self.get(
+            self.brick_code_endpoint + "/" + brick_id + ".zip", "Downloading source files"
+        )
 
 
 class GridManager(ControlPeerServiceRegistration):
     """handle all requests to the grid manager"""
+
     @property
     def address(self):
         return f"{configuration.gridmanager_address}"
diff --git a/titanfe/apps/control_peer/webapi/state.py b/titanfe/apps/control_peer/webapi/state.py
index c42101e..704d21f 100644
--- a/titanfe/apps/control_peer/webapi/state.py
+++ b/titanfe/apps/control_peer/webapi/state.py
@@ -36,6 +36,7 @@ def get_state(control_peer) -> List[Dict]:
     Returns:
         list of brick information
     """
+
     def brick_info(runner):
         brick = runner.brick
         return {
diff --git a/titanfe/apps/kafka_to_elastic/__main__.py b/titanfe/apps/kafka_to_elastic/__main__.py
index c630cfb..a4577b4 100644
--- a/titanfe/apps/kafka_to_elastic/__main__.py
+++ b/titanfe/apps/kafka_to_elastic/__main__.py
@@ -97,8 +97,11 @@ async def main():
 
     print("Reading", topics.name, "From", bootstrap_servers, "To", elastic_host)
 
-    async with KafkaReader(topics.name, bootstrap_servers=bootstrap_servers) as kafka, \
-            ElasticWriter(elastic_host=elastic_host) as elastic:  # pylint: disable= ; noqa
+    async with KafkaReader(
+        topics.name, bootstrap_servers=bootstrap_servers
+    ) as kafka, ElasticWriter(
+        elastic_host=elastic_host
+    ) as elastic:  # pylint: disable= ; noqa
         async for topic, records in kafka.read():
             len_records = f"{len(records)} record{'s' if len(records) > 1 else ''}"
             print(f"processing {len_records} from {topic.topic} of type {topics.type[topic.topic]}")
diff --git a/titanfe/apps/kafka_viewer/__main__.py b/titanfe/apps/kafka_viewer/__main__.py
index e9ad15a..556bad8 100644
--- a/titanfe/apps/kafka_viewer/__main__.py
+++ b/titanfe/apps/kafka_viewer/__main__.py
@@ -34,7 +34,9 @@ def main():
     """parse args and run the application"""
     arg_parser = argparse.ArgumentParser()
     arg_parser.add_argument(
-        "-topics", help="list of topics 'topic1 topic2", default="titanfe.metrics titanfe.logs",
+        "-topics",
+        help="list of topics 'topic1 topic2",
+        default="titanfe.metrics titanfe.logs",
     )
     arg_parser.add_argument(
         "-kafka",
diff --git a/titanfe/brick.py b/titanfe/brick.py
index db5c331..d07699e 100644
--- a/titanfe/brick.py
+++ b/titanfe/brick.py
@@ -23,7 +23,7 @@ class BrickBase(metaclass=ABCMeta):
     """An abstract base class for building Bricks"""
 
     def __init__(self, adapter: BrickAdapter, parameters: Optional[Dict] = None):
-        """ Initialize the Brick
+        """Initialize the Brick
 
         Arguments:
             adapter (BrickAdapter):
@@ -45,17 +45,17 @@ class BrickBase(metaclass=ABCMeta):
         self.teardown()
 
     def setup(self):
-        """ Upon loading the Brick in the BrickRunner the setup-method is run once
+        """Upon loading the Brick in the BrickRunner the setup-method is run once
         and can be used to e.g. open connections that will be held persistent.
         """
 
     def teardown(self):
-        """ When unloading the Brick from the BrickRunner the teardown-method is run once,
+        """When unloading the Brick from the BrickRunner the teardown-method is run once,
         implement it to e.g. close connections opened during `setup`"""
 
     @abstractmethod
     def process(self, input: Type[UjoBase], port: str):  # pylint: disable=redefined-builtin
-        """ Do the input processing.
+        """Do the input processing.
 
         To modify the payload of the current packet simply return a new value.
         Use the adapter's `emit_new_packet` to create a data packet and insert it into the flow.
@@ -75,7 +75,7 @@ class InletBrickBase(BrickBase, metaclass=ABCMeta):
 
     @abstractmethod
     def stop_processing(self):
-        """ The BrickRunner needs a way to properly end continuously running bricks.
+        """The BrickRunner needs a way to properly end continuously running bricks.
         It will call this method upon receiving a termination request
         and expect the processing to be aborted/terminated.
         """
diff --git a/titanfe/config.py b/titanfe/config.py
index 054b72e..7d2c939 100644
--- a/titanfe/config.py
+++ b/titanfe/config.py
@@ -45,28 +45,30 @@ OPTION_ALIASES = {
     "kafka_log_topic": "KafkaLogTopic",
     "brick_folder": "BrickFolder",
     "secret_key": "SecretKey",
-    "endpoint_provider": "EndpointProvider"
+    "endpoint_provider": "EndpointProvider",
 }
+
+
 @dataclass
 class Configuration:
     """Current Configuration"""
 
-    kafka_bootstrap_servers :str = DEFAULT_KAFKA_BOOTSTRAP_SERVER
-    kafka_log_topic :str= DEFAULT_KAFKA_LOG_TOPIC
+    kafka_bootstrap_servers: str = DEFAULT_KAFKA_BOOTSTRAP_SERVER
+    kafka_log_topic: str = DEFAULT_KAFKA_LOG_TOPIC
 
-    no_kafka_today :bool = literal_eval(
+    no_kafka_today: bool = literal_eval(
         os.getenv("TITAN_METRICS_DISABLED") or os.getenv("TITANFE_WITHOUT_KAFKA") or "False"
     )
 
-    gridmanager_address :str = DEFAULT_GRIDMANAGER_ADDRESS
-    flowmanager_address :str = DEFAULT_FLOWMANAGER_ADDRESS
-    packagemanager_address :str = DEFAULT_PACKAGEMANAGER_ADDRESS
-    reposervice_address :str = DEFAULT_REPOSERVICE_ADDRESS
-    secret_key :str = os.getenv("TITAN_SECRET_KEY") or None
-    endpoint_provider :str = DEFAULT_ENDPOINTPROVIDER_ADDRESS
-    IP :str = None
+    gridmanager_address: str = DEFAULT_GRIDMANAGER_ADDRESS
+    flowmanager_address: str = DEFAULT_FLOWMANAGER_ADDRESS
+    packagemanager_address: str = DEFAULT_PACKAGEMANAGER_ADDRESS
+    reposervice_address: str = DEFAULT_REPOSERVICE_ADDRESS
+    secret_key: str = os.getenv("TITAN_SECRET_KEY") or None
+    endpoint_provider: str = DEFAULT_ENDPOINTPROVIDER_ADDRESS
+    IP: str = None
 
-    brick_folder :str = str(Path.home() / "titanfe/bricks")
+    brick_folder: str = str(Path.home() / "titanfe/bricks")
 
     def update(self, config: Union["Configuration", dict]):
         """update config from dict or other config"""
diff --git a/titanfe/connection.py b/titanfe/connection.py
index 07e9ce4..18cbb29 100644
--- a/titanfe/connection.py
+++ b/titanfe/connection.py
@@ -31,8 +31,7 @@ NetworkAddress = namedtuple("NetworkAddress", ("host", "port"))
 
 
 def decode_ujo_message(ujo_bytes):
-    """Decode ujo bytes into a corresponding python object, but keep an existing "Payload" as Ujo.
-    """
+    """Decode ujo bytes into a corresponding python object, but keep an existing "Payload" as Ujo."""
     ujoobj = read_buffer(ujo_bytes)
     _, content = ujoobj[0], ujoobj[1]
 
@@ -60,12 +59,12 @@ def decode_ujo_message(ujo_bytes):
 class Connection:
     """Wrap an asyncio StreamReader/Writer combination into a connection object.
 
-     Arguments:
-         reader (asyncio.StreamReader): the stream reader
-         writer (asyncio.StreamWriter): the stream writer
-         log (logging.logger): a parent logger
-         encoding: "PICKLE" or "UJO"
-     """
+    Arguments:
+        reader (asyncio.StreamReader): the stream reader
+        writer (asyncio.StreamWriter): the stream writer
+        log (logging.logger): a parent logger
+        encoding: "PICKLE" or "UJO"
+    """
 
     def __init__(self, reader, writer, log=None, encoding=ENCODING):
         self.reader = reader
diff --git a/titanfe/get-pip.py b/titanfe/get-pip.py
index dd34881..9e5f9fe 100644
--- a/titanfe/get-pip.py
+++ b/titanfe/get-pip.py
@@ -56,7 +56,7 @@ except ImportError:
         out = []
         packI = struct.Struct("!I").pack
         for i in range(0, len(b), 5):
-            chunk = b[i:i+5]
+            chunk = b[i : i + 5]
             acc = 0
             try:
                 for c in iterbytes(chunk):
diff --git a/titanfe/log.py b/titanfe/log.py
index 41aa2a9..2d61436 100644
--- a/titanfe/log.py
+++ b/titanfe/log.py
@@ -37,7 +37,7 @@ class TitanLogRecord(logging.LogRecord):  # pylint: disable=too-few-public-metho
 
 @dataclass
 class FlowContext:
-    """ The Flow Context"""
+    """The Flow Context"""
 
     flowuid: str = ""
     flowname: str = ""
@@ -108,7 +108,7 @@ global_context = {}  # pylint: disable=invalid-name
 def getLogger(  # pylint: disable=invalid-name ; noqa: N802
     name: str, context: Optional[FlowContext] = None
 ) -> logging.Logger:
-    """ Get a Logger
+    """Get a Logger
     Args:
         name: the logger name
         context: a flow context (if available)
@@ -133,7 +133,7 @@ def getLogger(  # pylint: disable=invalid-name ; noqa: N802
 
 
 def initialize(service=""):
-    """ initialize the titan logging module, e.g. set up a KafkaLogHandler
+    """initialize the titan logging module, e.g. set up a KafkaLogHandler
 
     Args:
         service: name of the current service
@@ -155,7 +155,7 @@ def initialize(service=""):
 
 
 def add_logging_level(level, level_name, method_name=None):
-    """ add a level to the logging module
+    """add a level to the logging module
 
     Args:
         level (int): level number
@@ -180,20 +180,20 @@ def add_logging_level(level, level_name, method_name=None):
 
 
 def flush_kafka_log_handler():
-    """"Flush messages sent to KafkaLogHandler and
+    """ "Flush messages sent to KafkaLogHandler and
     suppress warnings from kafka
     --> called during shutdown of brick runner"""
     for handler in logging.getLogger("titanfe").handlers:
         if isinstance(handler, KafkaLogHandler):
             handler.flush()
-    logging.getLogger('kafka').propagate = False
+    logging.getLogger("kafka").propagate = False
 
 
 class UjoBinFormatter(logging.Formatter):
-    """ Format log records as an UjoBinary"""
+    """Format log records as an UjoBinary"""
 
     def format(self, record):
-        """ Format a log record as an UjoBinary
+        """Format a log record as an UjoBinary
 
         Args:
             record (logging.Record): the log record
diff --git a/titanfe/messages.py b/titanfe/messages.py
index e2327c6..655cbe5 100644
--- a/titanfe/messages.py
+++ b/titanfe/messages.py
@@ -14,6 +14,7 @@ from enum import IntEnum
 
 class MessageType(IntEnum):
     """Types of Messages used within titanfe"""
+
     # pylint: disable=invalid-name
     Packet = 20
     PacketRequest = 21
diff --git a/titanfe/repository.py b/titanfe/repository.py
index 590bfab..fd86133 100644
--- a/titanfe/repository.py
+++ b/titanfe/repository.py
@@ -36,11 +36,11 @@ class RequestData:
 class Request:
     """Request object
 
-     Args:
-       address : str       Target address
-       method : Callable   requests method(get, put, delete..)
-       logger              : the logger instance of the parent
-       content: RequestData request content to be sent
+    Args:
+      address : str       Target address
+      method : Callable   requests method(get, put, delete..)
+      logger              : the logger instance of the parent
+      content: RequestData request content to be sent
     """
 
     address: str
@@ -50,7 +50,7 @@ class Request:
     response: Any = field(default_factory=dict)
 
     def send(self):
-        """send request """
+        """send request"""
 
         if self.content:
             try:
@@ -84,7 +84,10 @@ class RepositoryService:
     def __create_request(self, method, collection, document, value=None, find=None):
         "create and send request object understood by the repo service"
         request_data = RequestData(  # pylint: disable=no-member
-            document=document, collection=collection, value=value, find=find,
+            document=document,
+            collection=collection,
+            value=value,
+            find=find,
         ).to_json()  # pylint: disable=no-member
 
         self.log.debug("Created request data %r :", request_data)
diff --git a/titanfe/utils.py b/titanfe/utils.py
index 3ef8031..0318bc8 100644
--- a/titanfe/utils.py
+++ b/titanfe/utils.py
@@ -111,7 +111,7 @@ def time_delta_in_ms(time_ns):
 
 
 def get_module(location: Union[str, Path]) -> ModuleType:
-    """ Get the Brick content module
+    """Get the Brick content module
 
     If the Brick content module cannot be found, None is returned.
 
@@ -146,7 +146,10 @@ def get_module(location: Union[str, Path]) -> ModuleType:
             module = importlib.import_module(name)
     except Exception:  # pylint: disable=broad-except
         log.error(
-            "loading module failed: name: %r, location: %r", name, location, exc_info=True,
+            "loading module failed: name: %r, location: %r",
+            name,
+            location,
+            exc_info=True,
         )
 
     return module
@@ -158,7 +161,7 @@ def get_ip_address() -> str:
 
 
 class Timer:
-    """ a simple Timer using the performance counters from the "time"-module
+    """a simple Timer using the performance counters from the "time"-module
 
     >>> with Timer() as t:
     >>>    # do_something()
@@ -218,7 +221,7 @@ class DictConvertable(ABC):
 
     def dicts_to_dataclasses(self):
         """Convert all fields of type `dataclass` into an dataclass instance of the
-           fields specified dataclass if the current value is of type dict."""
+        fields specified dataclass if the current value is of type dict."""
         for data_field in fields(self):
             if not is_dataclass(data_field.type):
                 continue
@@ -256,10 +259,10 @@ class Flag(asyncio.Event):
 
 
 def generate_key(secret_key, salt):
-    return hashlib.pbkdf2_hmac('sha1', secret_key, salt, dklen=32, iterations=4096)
+    return hashlib.pbkdf2_hmac("sha1", secret_key, salt, dklen=32, iterations=4096)
 
 
 def truncate(text: str, max_length: int) -> str:
     if len(text) >= max_length:
-        text = text[:max_length-3] + "..."
+        text = text[: max_length - 3] + "..."
     return text
-- 
GitLab


From 131bda02be91df51723cc78fb8e717bfac0ed4d5 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 18 Sep 2023 13:50:50 +0200
Subject: [PATCH 04/29] dont stop on max_idle_time until we have found a way
 for the gm to handle the lifecycle management

---
 titanfe/apps/brick_runner/input.py     | 36 ++++++++++++++------------
 titanfe/apps/brick_runner/metrics.py   |  5 +++-
 titanfe/apps/brick_runner/runner.py    | 10 ++++---
 titanfe/apps/brick_runner/transport.py | 17 +++++++-----
 titanfe/apps/control_peer/config.yaml  | 10 +++----
 titanfe/config.py                      | 23 ++++++++++++++--
 6 files changed, 67 insertions(+), 34 deletions(-)

diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index 23b4f4f..5dc34ff 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -38,21 +38,25 @@ class Input:
         return self
 
     async def __anext__(self) -> Packet:
-        self._getter = asyncio.create_task(self._packets.get())
-        try:
-            self.log.debug("wait for packet")
-            port, packet = await asyncio.wait_for(self._getter, timeout=self.max_idle_time)
-            self.log.debug("got packet: %r", packet)
-        except asyncio.CancelledError:
-            self.log.debug("packet getter was cancelled")
-            raise StopAsyncIteration
-        except asyncio.TimeoutError:
-            self.log.debug("packet getter timed out")
-            raise StopAsyncIteration
-        else:
-            packet.port = port
-            packet.update_input_exit()
-            return packet
+        while True: # this can go once we don't need the `continue` any longer
+            self._getter = asyncio.create_task(self._packets.get())
+            try:
+                self.log.debug("wait for packet")
+                port, packet = await asyncio.wait_for(self._getter, timeout=self.max_idle_time)
+                self.log.debug("got packet: %r", packet)
+            except asyncio.CancelledError:
+                self.log.debug("packet getter was cancelled")
+                raise StopAsyncIteration
+            except asyncio.TimeoutError:
+                self.log.debug("packet getter timed out")
+                # for now: continue, even when we reach max idle time
+                # until lifecycle management with rabbitmq is solved
+                continue
+                # raise StopAsyncIteration
+            else:
+                packet.port = port
+                packet.update_input_exit()
+                return packet
 
     async def start(self):
         self.log.debug("start consumers: %r", self._queues)
@@ -65,7 +69,7 @@ class Input:
 
     async def stop(self):
         self.log.debug("stop consumers: %r", self._queues)
-        await asyncio.gather(self.transport.stop_consumer(queue) for queue in self._queues.values())
+        await asyncio.gather(*(self.transport.stop_consumer(queue) for queue in self._queues.values()))
         self._getter.cancel()
 
     async def _on_new_message(self, port, packet, done_callback: T.Callable):
diff --git a/titanfe/apps/brick_runner/metrics.py b/titanfe/apps/brick_runner/metrics.py
index a03f5dc..e8a7c0a 100644
--- a/titanfe/apps/brick_runner/metrics.py
+++ b/titanfe/apps/brick_runner/metrics.py
@@ -65,7 +65,10 @@ class MetricEmitter:
         self.log.metric("%s", metrics_dict)
 
         if self.kafka:
-            await self.kafka.send("titanfe.metrics", metrics_dict)
+            try:
+                await self.kafka.send("titanfe.metrics", metrics_dict)
+            except aiokafka.errors.ProducerClosed:
+                pass  # we are most likely shutting down operations
 
     async def emit_queue_metrics(self, queue_name, queue_length):
         queue_metrics = QueueMetrics(
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index f8f3ed6..c81166a 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -51,6 +51,7 @@ class BrickRunner:
         self.idle_since = None
         self.metric_emitter = None
         self.tasks = []
+        self._stop = asyncio.Event()
 
     @classmethod
     async def create(cls, uid, brick_definition: BrickInstanceDefinition):
@@ -95,15 +96,16 @@ class BrickRunner:
         try:
             with self.brick:
                 if self.brick.is_inlet:
-                    return await self.brick.process(Packet())
+                    await self.brick.process(Packet())
+                    await self._stop.wait()
+                    return
 
-                # else:
+                #else:
                 await self.input.start()
                 async for packet in self.input:
                     self.log.debug("process packet: %s", packet)
                     await self.brick.process(packet)
                     self.input.mark_done(packet)
-
         except Exception:  # pylint: disable=broad-except
             self.log.with_context.error("Brick failed", exc_info=True)
         self.log.with_context.info("Exit")
@@ -125,7 +127,7 @@ class BrickRunner:
     async def stop_processing(self):
         """stop processing bricks"""
         self.log.info("Stop Processing")
-        await self.gridmanager.deregister_runner()
+        self._stop.set()
         logging.flush_kafka_log_handler()
         await self.input.stop()
         self.brick.terminate()
diff --git a/titanfe/apps/brick_runner/transport.py b/titanfe/apps/brick_runner/transport.py
index c982eb8..47e1fdf 100644
--- a/titanfe/apps/brick_runner/transport.py
+++ b/titanfe/apps/brick_runner/transport.py
@@ -5,6 +5,7 @@ import aioamqp.protocol
 import aioamqp.channel
 
 import titanfe.log
+from titanfe.config import configuration as config
 
 
 class RabbitMQ:
@@ -22,10 +23,10 @@ class RabbitMQ:
 
     async def connect(self):
         _, self._connection = await aioamqp.connect(
-            host="rabbitmq",
-            port="5672",
-            login="wobe",
-            password="RabbitMQ-4-OpenFBA",
+            host=config.rabbitmq_params.host,
+            port=config.rabbitmq_params.port,
+            login=config.rabbitmq_params.user,
+            password=config.rabbitmq_params.password,
             heartbeat=60,
         )
         self.log.debug("connected to rabbitmq")
@@ -62,10 +63,11 @@ class RabbitMQ:
 
         await channel.queue_declare(queue_name, durable=True)
         consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
-        self._consumers[queue_name] = consumer_tag
+        self._consumers[queue_name] = consumer_tag["consumer_tag"]
 
     async def stop_consumer(self, queue_name):
         tag = self._consumers.pop(queue_name)
+        print("THE TAG!", repr(tag))
         channel = await self.channel()
         await channel.basic_cancel(tag)
 
@@ -76,4 +78,7 @@ class RabbitMQ:
         self.log.debug("publish to %r: %r", queue_name, message)
 
         channel = await self.channel()
-        await channel.basic_publish(message, self.exchange_name, routing_key=queue_name)
+        try:
+            await channel.basic_publish(message, self.exchange_name, routing_key=queue_name)
+        except aioamqp.exceptions.ChannelClosed:
+            pass  # we are most likely shutting down operations
diff --git a/titanfe/apps/control_peer/config.yaml b/titanfe/apps/control_peer/config.yaml
index c021646..700bcdb 100644
--- a/titanfe/apps/control_peer/config.yaml
+++ b/titanfe/apps/control_peer/config.yaml
@@ -1,10 +1,10 @@
 GridManager : "http://localhost:8080/gridmanager"  # Address of the GridManager
-
 FlowManager: "http://localhost:9002/flowmanager" # Address of the FlowManager
 PackageManager: "http://localhost:8087/packagemanager" # Address of the FlowManager
 RepositoryService: "http://localhost:8085/object" # Address of the Repository service
-#Kafka: "10.14.0.23:9092" #Address of the Kafka bootstrap server"
-#Kafka:" localhost:9092"
-#Kafka: "192.168.69.128:9092"
-IP: "192.168.178.43" #IP address of the host where the CP runs
+RabbitMQUrl: "amqp://guest:guest@localhost:5672"
+IP: "127.0.0.1" # IP address of the host where the CP runs
 EndpointProvider: "tcp://192.168.178.43:9021" # Address of the EndpointProviders ZeroMQ router
+Kafka: "localhost:9092" # Address of the Kafka bootstrap server"
+#Kafka: "10.14.0.23:9092"
+#Kafka: "192.168.69.128:9092"
\ No newline at end of file
diff --git a/titanfe/config.py b/titanfe/config.py
index 7d2c939..3f9eb94 100644
--- a/titanfe/config.py
+++ b/titanfe/config.py
@@ -6,7 +6,7 @@
 #
 
 """ the global configuration """
-
+import urllib.parse
 from dataclasses import dataclass
 import os
 from ast import literal_eval
@@ -20,6 +20,7 @@ from ruamel.yaml import YAMLError
 
 DEFAULT_KAFKA_BOOTSTRAP_SERVER = "10.14.0.23:9092"
 DEFAULT_KAFKA_LOG_TOPIC = "titan.logs"
+DEFAULT_RABBITMQ_URL = "amqp://guest:guest@localhost:5672"
 DEFAULT_GRIDMANAGER_ADDRESS = "http://localhost:8080/gridmanager"
 DEFAULT_FLOWMANAGER_ADDRESS = "http://localhost:9002/flowmanager"
 DEFAULT_PACKAGEMANAGER_ADDRESS = "http://localhost:8087/packagemanager"
@@ -41,6 +42,7 @@ OPTION_ALIASES = {
     "flowmanager_address": "FlowManager",
     "packagemanager_address": "PackageManager",
     "reposervice_address": "RepositoryService",
+    "rabbitmq_url": "RabbitMQUrl",
     "kafka_bootstrap_servers": "Kafka",
     "kafka_log_topic": "KafkaLogTopic",
     "brick_folder": "BrickFolder",
@@ -48,6 +50,13 @@ OPTION_ALIASES = {
     "endpoint_provider": "EndpointProvider",
 }
 
+@dataclass
+class RabbitMQConnectionParams:
+    host: str
+    port: int
+    user: str
+    password: str
+
 
 @dataclass
 class Configuration:
@@ -59,7 +68,7 @@ class Configuration:
     no_kafka_today: bool = literal_eval(
         os.getenv("TITAN_METRICS_DISABLED") or os.getenv("TITANFE_WITHOUT_KAFKA") or "False"
     )
-
+    rabbitmq_url: str = DEFAULT_RABBITMQ_URL
     gridmanager_address: str = DEFAULT_GRIDMANAGER_ADDRESS
     flowmanager_address: str = DEFAULT_FLOWMANAGER_ADDRESS
     packagemanager_address: str = DEFAULT_PACKAGEMANAGER_ADDRESS
@@ -94,5 +103,15 @@ class Configuration:
         except YAMLError as error:
             print("Could not parse config file", file_path, "-", error)
 
+    @property
+    def rabbitmq_params(self):
+        parsed_url = urllib.parse.urlparse(self.rabbitmq_url)
+        return RabbitMQConnectionParams(
+            host=parsed_url.hostname,
+            port=parsed_url.port,
+            user=urllib.parse.unquote(parsed_url.username) or None,
+            password=urllib.parse.unquote(parsed_url.password) or None,
+        )
+
 
 configuration = Configuration()
-- 
GitLab


From f7d45bde2b1ad1c5c275c9a8c4c0e6e584bfac5a Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 18 Oct 2023 11:58:05 +0200
Subject: [PATCH 05/29] fix packets from inlet all having the same uid

---
 titanfe/apps/brick_runner/brick.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index e53cef8..aa7856d 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -178,7 +178,7 @@ class Brick:
         if not isinstance(payload, UjoBase):
             payload = python_to_ujo(payload)
 
-        packet = copy(parent_packet) if parent_packet else Packet(port=port)
+        packet = Packet(port=port) if self.is_inlet else copy(parent_packet)
         packet.payload = payload
 
         self.log.debug("brick output on port [%s]: %r", port, packet)
-- 
GitLab


From 7a4bb3677f9c6cf8ec4568ab3fb69e998767ed3c Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 18 Oct 2023 11:59:05 +0200
Subject: [PATCH 06/29] make packets hashable by identity

---
 titanfe/apps/brick_runner/packet.py | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/titanfe/apps/brick_runner/packet.py b/titanfe/apps/brick_runner/packet.py
index 3138d72..958cd32 100644
--- a/titanfe/apps/brick_runner/packet.py
+++ b/titanfe/apps/brick_runner/packet.py
@@ -47,6 +47,14 @@ class Packet(DictConvertable):
     def __repr__(self):
         return f"Packet(uid={self.uid}, payload={self.payload})"
 
+    def __hash__(self):
+        # careful with that axe, eugene!
+        # to allow dict[packet], I'm making it hashable by id
+        # if we copy the object or recreate it somehow e.g. from_dict(to_dict) it will have a different hash
+        # why not use the "uid"?
+        # because if a packet gets split up into multiple packets, they all have the same uid
+        return id(self)
+
     @property
     def traveling_time(self) -> float:
         return time_delta_in_ms(self.started)
-- 
GitLab


From 53d3715fd2325041410ba74e884fdd747a8accd0 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 18 Oct 2023 12:03:30 +0200
Subject: [PATCH 07/29] dont wait in _on_new_message, so that prefetch should
 work...

---
 titanfe/apps/brick_runner/input.py     | 15 ++++++---------
 titanfe/apps/brick_runner/transport.py |  3 +--
 2 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index 5dc34ff..b3d8e9b 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -31,8 +31,8 @@ class Input:
 
         self._getter = asyncio.Future()
         self._queues = input_queues
-        self._packets = asyncio.Queue()  # maxsize=1)
-        self._packets_done = {}
+        self._packets = asyncio.Queue()
+        self._packet_done_callbacks = {}
 
     def __aiter__(self):
         return self
@@ -80,23 +80,20 @@ class Input:
 
         try:
             packet = Packet.from_bytes(packet) if isinstance(packet, bytes) else packet
-        except Exception as error:
+        except Exception:
             self.log.error("Failed to convert message to packet", exc_info=True)
             done_callback()
             return
 
         packet.update_input_entry()
 
-        marked_done = asyncio.Event()
-        self._packets_done[packet.uid] = marked_done
+        self._packet_done_callbacks[packet] = done_callback
 
         self.log.debug("move to input queue: %r", (port, packet))
         await self._packets.put((port, packet))
-        await marked_done.wait()
-        await done_callback()
 
     def mark_done(self, packet):
         self.log.debug("mark done: %r", packet)
-        marked_done = self._packets_done.pop(packet.uid)
-        marked_done.set()
+        done_callback = self._packet_done_callbacks.pop(packet)
+        asyncio.create_task(done_callback())
         self._packets.task_done()
diff --git a/titanfe/apps/brick_runner/transport.py b/titanfe/apps/brick_runner/transport.py
index 47e1fdf..6812e5c 100644
--- a/titanfe/apps/brick_runner/transport.py
+++ b/titanfe/apps/brick_runner/transport.py
@@ -31,7 +31,7 @@ class RabbitMQ:
         )
         self.log.debug("connected to rabbitmq")
         self._channel = await self._connection.channel()
-        await self._channel.basic_qos(prefetch_count=1)
+        await self._channel.basic_qos(prefetch_count=2)
 
     async def disconnect(self):
         if not self._connection:
@@ -67,7 +67,6 @@ class RabbitMQ:
 
     async def stop_consumer(self, queue_name):
         tag = self._consumers.pop(queue_name)
-        print("THE TAG!", repr(tag))
         channel = await self.channel()
         await channel.basic_cancel(tag)
 
-- 
GitLab


From 586a10ad7dc8a26e54ccb46c346ac9b6fd611bd7 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 18 Oct 2023 15:41:16 +0200
Subject: [PATCH 08/29] use fixed version of pydantic (newer ones require a
 rust-toolchain, which we still have to setup in the pipeline containers)

---
 requirements_prod.txt | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/requirements_prod.txt b/requirements_prod.txt
index 883bc26..8fe9b8e 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -8,7 +8,8 @@ kafka-python == 1.4.6
 elasticsearch == 7.8.*  # strangely 7.9.0 is missing the async parts that were introduced with 7.8.0?
 fastapi == 0.85.1
 starlette ==0.20.4
-uvicorn == 0.9.* # fixed ude to the uvloop later version require
+uvicorn == 0.9.*  # fixed due to the uvloop later versions require
+pydantic == 1.10.*  # until we have the required rust-toolchain in place in the pipeline
 aiohttp >= 3.6.2
 aiohttp-requests >= 0.1.3  # as required for elasticsearch async
 dataclasses-json
-- 
GitLab


From 548b6b0cf9c0988ba5bdf0a8bfb1f5fe15025a28 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 18 Oct 2023 15:58:58 +0200
Subject: [PATCH 09/29] make setup.py and requirements.txt use the same version
 of fastapi/starlette

---
 requirements_prod.txt | 5 ++---
 setup.py              | 3 ++-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/requirements_prod.txt b/requirements_prod.txt
index 8fe9b8e..65a9f6f 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -6,10 +6,9 @@ janus
 aiokafka == 0.5.2 # fixed due to the kafka-python later version require
 kafka-python == 1.4.6
 elasticsearch == 7.8.*  # strangely 7.9.0 is missing the async parts that were introduced with 7.8.0?
-fastapi == 0.85.1
-starlette ==0.20.4
+fastapi == 0.9.*
+starlette == 0.24.*
 uvicorn == 0.9.*  # fixed due to the uvloop later versions require
-pydantic == 1.10.*  # until we have the required rust-toolchain in place in the pipeline
 aiohttp >= 3.6.2
 aiohttp-requests >= 0.1.3  # as required for elasticsearch async
 dataclasses-json
diff --git a/setup.py b/setup.py
index 085415b..0499689 100644
--- a/setup.py
+++ b/setup.py
@@ -70,7 +70,8 @@ setup(
         "aiokafka == 0.5.2",  # fixed due to the kafka-python version required by later versions
         "kafka-python == 1.4.6",  # aiokafka 0.5.2 requires this version
         "elasticsearch == 7.8.*",
-        "fastapi",
+        "starlette == 0.24.*",
+        "fastapi == 0.9.*",
         "uvicorn == 0.9.*",  # fixed due to the uvloop later versions require
         'uvloop == 0.13.* ;platform_system=="Linux"',
         "aiohttp >= 3.6.2",
-- 
GitLab


From c0caa0a7d8d1ab74655550a5a451230747d5838d Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 18 Oct 2023 21:57:12 +0200
Subject: [PATCH 10/29] fix linting issues

---
 test/brick_runner/conftest.py              |  8 ----
 test/brick_runner/test_runner.py           | 32 +++++++++----
 titanfe/apps/brick_runner/adapter.py       |  2 +-
 titanfe/apps/brick_runner/brick.py         |  9 ++--
 titanfe/apps/brick_runner/input.py         | 27 +++++++----
 titanfe/apps/brick_runner/metrics.py       | 27 +++++++----
 titanfe/apps/brick_runner/output.py        |  5 +-
 titanfe/apps/brick_runner/packet.py        |  8 ++--
 titanfe/apps/brick_runner/runner.py        |  1 -
 titanfe/apps/brick_runner/transport.py     | 21 ++++++--
 titanfe/apps/brick_runner/value_mapping.py |  8 ++--
 titanfe/apps/control_peer/brick.py         | 56 +++++++++++++++-------
 titanfe/config.py                          |  5 +-
 titanfe/connection.py                      |  4 +-
 titanfe/testing/testrunner.py              | 25 +++++-----
 15 files changed, 154 insertions(+), 84 deletions(-)

diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index f84b333..3b7eb14 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -8,18 +8,15 @@
 """
 Fixtures for BrickRunner-Tests
 """
-import asyncio
 # pylint: disable=redefined-outer-name
 import sys
 import types
 import logging
-from collections import defaultdict
 from pathlib import Path
 from unittest.mock import MagicMock, patch
 
 import pytest
 
-from apps.brick_runner.transport import RabbitMQ
 from titanfe.apps.brick_runner.metrics import MetricEmitter
 from titanfe.apps.brick_runner.runner import BrickRunner
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
@@ -68,8 +65,6 @@ class GridManagerDummy:
     register_runner = MagicMock()
 
 
-
-
 @pytest.mark.asyncio
 @pytest.fixture()
 async def brick_runner():
@@ -134,10 +129,7 @@ async def brick_runner():
     module = types.ModuleType("BrickDummy")
     module.do_brick_processing = lambda *args, **kwargs: None
 
-    gridmanager = "titanfe.apps.brick_runner.runner.GridManager"
     getmodule = "titanfe.apps.brick_runner.brick.get_module"
-    rabbitmq = "titanfe.apps.brick_runner.transport.RabbitMQ"
-    # with patch(gridmanager, MagicMock(return_value=GridManagerDummy())), patch(
     with patch(
         getmodule, MagicMock(return_value=module)
     ):
diff --git a/test/brick_runner/test_runner.py b/test/brick_runner/test_runner.py
index b409dde..033057f 100644
--- a/test/brick_runner/test_runner.py
+++ b/test/brick_runner/test_runner.py
@@ -31,7 +31,7 @@ class RabbitMQDouble(RabbitMQ):
         self.IN = defaultdict(asyncio.Queue)
         self.OUT = asyncio.Queue()
 
-    async def start_consumer(self, queue_name, callback):
+    async def start_consuming(self, queue_name, callback):
         message = await self.IN[queue_name].get()
 
         async def done_callback():
@@ -39,11 +39,17 @@ class RabbitMQDouble(RabbitMQ):
 
         callback(message, done_callback())
 
-    async def stop_consumer(self, queue_name):
+    async def stop_consuming(self, queue_name):
         pass
 
     async def publish(self, queue_name, message):
-        await self.OUT.put({"message": message, "exchange": self.exchange_name, "routing_key": queue_name})
+        await self.OUT.put(
+            {
+                "message": message,
+                "exchange": self.exchange_name,
+                "routing_key": queue_name,
+            }
+        )
 
 
 @pytest.mark.asyncio
@@ -54,7 +60,9 @@ async def test_basic_packet_processing(brick_runner):  # noqa: F811
     brick_runner.brick.is_inlet = False
 
     class Brick(BrickBase):
-        def process(self, input, port):  # pylint: disable=unused-argument, redefined-builtin
+        def process(
+            self, input, port
+        ):  # pylint: disable=unused-argument, redefined-builtin
             return UjoStringC("NewValue")
 
     brick_runner.brick.module.Brick = Brick
@@ -67,9 +75,15 @@ async def test_basic_packet_processing(brick_runner):  # noqa: F811
 
     payload = UjoStringC("InitialValue")
 
-    input_packet1 = Packet(uid="Test1", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
-    input_packet2 = Packet(uid="Test2", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
-    input_packet3 = Packet(uid="Test3", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
+    input_packet1 = Packet(
+        uid="Test1", payload=payload, buffer=Buffer(), port=DEFAULT_PORT
+    )
+    input_packet2 = Packet(
+        uid="Test2", payload=payload, buffer=Buffer(), port=DEFAULT_PORT
+    )
+    input_packet3 = Packet(
+        uid="Test3", payload=payload, buffer=Buffer(), port=DEFAULT_PORT
+    )
 
     await brick_runner.input._packets.put((DEFAULT_PORT, input_packet1))
     await brick_runner.input._packets.put((DEFAULT_PORT, input_packet2))
@@ -106,7 +120,9 @@ async def test_exit_when_idle(brick_runner):  # noqa: F811
     max_idle_seconds = 0.2
 
     class Brick(BrickBase):
-        def process(self, input, port):  # pylint: disable=unused-argument, redefined-builtin
+        def process(
+            self, input, port
+        ):  # pylint: disable=unused-argument, redefined-builtin
             return UjoStringC("NewValue")
 
     brick_runner.brick.module.Brick = Brick
diff --git a/titanfe/apps/brick_runner/adapter.py b/titanfe/apps/brick_runner/adapter.py
index 3996a28..9956cd6 100644
--- a/titanfe/apps/brick_runner/adapter.py
+++ b/titanfe/apps/brick_runner/adapter.py
@@ -72,7 +72,7 @@ class BrickAdapter:  # pylint: disable=too-few-public-methods
         self.log.debug("brick emitted new value: %r , port: %s", value, port or self.__default_port)
         self.__put_packet(value, port or self.__default_port)
 
-    def decrypt_parameter(self, parameter):  # pylint: disable=no-self-use
+    def decrypt_parameter(self, parameter):
         """Decrypt a secret parameter using AES GCM
 
         Args:
diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index aa7856d..35fe36c 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -10,7 +10,6 @@ import asyncio
 import time
 from collections import namedtuple
 from copy import copy
-from functools import partial
 
 import janus
 from ujotypes import UjoBase
@@ -18,10 +17,9 @@ from ujotypes import UjoBase
 from titanfe import log as logging
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe.brick import InletBrickBase
-from titanfe.utils import get_module, time_delta_in_ms, Flag
+from titanfe.utils import get_module, time_delta_in_ms
 from titanfe.ujo_helper import python_to_ujo
 from .adapter import BrickAdapter, AdapterMeta
-from .input import Input
 from .output import Output
 from .packet import Packet
 from ...constants import DEFAULT_PORT
@@ -147,7 +145,8 @@ class Brick:
                 elif len(result) > 2:
                     payload, port = result, self.default_port
                 else:
-                    # this assumption might fail if someone returns a two-item tuple not containing payload and port.
+                    # this assumption might fail if someone returns
+                    # a two-item tuple not containing payload and port.
                     # we should make that more explicit somehow
                     payload, port = result
 
@@ -159,6 +158,7 @@ class Brick:
         self._brick_output.sync_q.put(SENTINEL)
 
     async def process_output(self, parent_packet):
+        """publish the bricks output"""
         while True:
             brick_output = await self._brick_output.async_q.get()
             self.log.debug("process brick output: %r", brick_output)
@@ -172,6 +172,7 @@ class Brick:
                 self._brick_output.async_q.task_done()
 
     async def publish_packet(self, payload, port, parent_packet):
+        """publish the packet"""
         if port is None:
             port = self.default_port
 
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index b3d8e9b..03303e8 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -8,15 +8,16 @@
 """The INPUT side of a brick (runner)"""
 import asyncio
 import functools
-import titanfe.log
 import typing as T
 
+import titanfe.log
 from titanfe.apps.brick_runner.packet import Packet
 
 Port = T.NewType("Port", str)
 
 
 class Input:
+    """the input side of brick, using rabbitmq as transport medium"""
     def __init__(
         self,
         input_queues: T.Dict[Port, str],
@@ -25,9 +26,13 @@ class Input:
         logger=None,
     ):
         self.transport = transport
-        self.log = logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
+        self.log = (
+            logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
+        )
 
-        self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
+        self.max_idle_time = max(
+            (max_idle_time, 0.2)
+        )  # give it at least a chance to run
 
         self._getter = asyncio.Future()
         self._queues = input_queues
@@ -38,11 +43,13 @@ class Input:
         return self
 
     async def __anext__(self) -> Packet:
-        while True: # this can go once we don't need the `continue` any longer
+        while True:  # this can go once we don't need the `continue` any longer
             self._getter = asyncio.create_task(self._packets.get())
             try:
                 self.log.debug("wait for packet")
-                port, packet = await asyncio.wait_for(self._getter, timeout=self.max_idle_time)
+                port, packet = await asyncio.wait_for(
+                    self._getter, timeout=self.max_idle_time
+                )
                 self.log.debug("got packet: %r", packet)
             except asyncio.CancelledError:
                 self.log.debug("packet getter was cancelled")
@@ -62,14 +69,18 @@ class Input:
         self.log.debug("start consumers: %r", self._queues)
         await asyncio.gather(
             *(
-                self.transport.start_consumer(queue, functools.partial(self._on_new_message, port))
+                self.transport.start_consuming(
+                    queue, functools.partial(self._on_new_message, port)
+                )
                 for port, queue in self._queues.items()
             )
         )
 
     async def stop(self):
         self.log.debug("stop consumers: %r", self._queues)
-        await asyncio.gather(*(self.transport.stop_consumer(queue) for queue in self._queues.values()))
+        await asyncio.gather(
+            *(self.transport.stop_consuming(queue) for queue in self._queues.values())
+        )
         self._getter.cancel()
 
     async def _on_new_message(self, port, packet, done_callback: T.Callable):
@@ -80,7 +91,7 @@ class Input:
 
         try:
             packet = Packet.from_bytes(packet) if isinstance(packet, bytes) else packet
-        except Exception:
+        except Exception:  # pylint: disable=broad-exception-caught  # who knows what might go wrong
             self.log.error("Failed to convert message to packet", exc_info=True)
             done_callback()
             return
diff --git a/titanfe/apps/brick_runner/metrics.py b/titanfe/apps/brick_runner/metrics.py
index e8a7c0a..aaf3c64 100644
--- a/titanfe/apps/brick_runner/metrics.py
+++ b/titanfe/apps/brick_runner/metrics.py
@@ -29,7 +29,11 @@ class MetricEmitter:
     """
 
     def __init__(self, metrics_metadata, logger):
-        self.log = logger.getChild("MetricEmitter") if logger else titanfe.log.getLogger(__name__)
+        self.log = (
+            logger.getChild("MetricEmitter")
+            if logger
+            else titanfe.log.getLogger(__name__)
+        )
         self.kafka = None
         self.metrics_meta = metrics_metadata
 
@@ -62,6 +66,7 @@ class MetricEmitter:
         self.metrics_meta = MetricsBase.extract_from_runner(runner)
 
     async def emit(self, metrics_dict):
+        """emit the metrics"""
         self.log.metric("%s", metrics_dict)
 
         if self.kafka:
@@ -76,7 +81,9 @@ class MetricEmitter:
         )
         await self.emit(queue_metrics.to_dict())
 
-    async def emit_packet_metrics(self, packet, duration):  # pylint: disable=missing-docstring
+    async def emit_packet_metrics(
+        self, packet, duration
+    ):  # pylint: disable=missing-docstring
         packet_metrics = PacketMetricsAtBrick(
             **self.metrics_meta,
             packet=packet.uid,
@@ -140,14 +147,14 @@ class MetricsBase(DictConvertable, ABC):
     def extract_from_runner(runner):
         """extract the basic information from a brick runner instance"""
         if runner.brick:
-            return dict(
-                runner=runner.uid,
-                brick=runner.brick.name,
-                brick_type=runner.brick.brick_type,
-                brick_family=runner.brick.brick_family,
-                flow=runner.brick.flow.name,
-            )
-        return dict(runner=runner.uid)
+            return {
+                "runner": runner.uid,
+                "brick": runner.brick.name,
+                "brick_type": runner.brick.brick_type,
+                "brick_family": runner.brick.brick_family,
+                "flow": runner.brick.flow.name,
+            }
+        return {"runner": runner.uid}
 
 
 @dataclass
diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
index 484822d..816a4ef 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/titanfe/apps/brick_runner/output.py
@@ -13,15 +13,17 @@ from dataclasses import dataclass
 
 from UJOSchema import schema_to_type
 
+from ujotypes import UjoBase, UJO_VARIANT_NONE
+
 import titanfe.log
 from titanfe.apps.brick_runner.packet import Packet
 from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
 from titanfe.apps.control_peer.brick import Connection
-from ujotypes import UjoBase, UJO_VARIANT_NONE
 
 
 @dataclass
 class Consumer:
+    """everything we need to know about a subsequent brick"""
     queue_name: str
     target_type: UjoBase
     mapping_rules: MappingRules
@@ -41,6 +43,7 @@ class Consumer:
         )
 
     def create_packet(self, packet: Packet) -> Packet:
+        """create a new packet for the subsequent brick"""
         packet = copy(packet)
 
         try:
diff --git a/titanfe/apps/brick_runner/packet.py b/titanfe/apps/brick_runner/packet.py
index 958cd32..d649877 100644
--- a/titanfe/apps/brick_runner/packet.py
+++ b/titanfe/apps/brick_runner/packet.py
@@ -12,14 +12,14 @@ import pickle
 import time
 from dataclasses import dataclass, field
 
+from ujotypes import UjoBase, UjoStringUTF8, read_buffer, UjoMap, ujo_to_python
+from ujotypes.variants.none import UJO_VARIANT_NONE
 
 from titanfe.messages import PacketMessage
 from titanfe.ujo_helper import py_to_ujo_bytes
 from titanfe.utils import create_uid, ns_to_ms, time_delta_in_ms, DictConvertable
 from titanfe.apps.brick_runner.value_mapping import Buffer
 
-from ujotypes import UjoBase, UjoStringUTF8, read_buffer, UjoMap, ujo_to_python
-from ujotypes.variants.none import UJO_VARIANT_NONE
 
 # ENCODING = "PICKLE"
 ENCODING = "UJO"
@@ -50,7 +50,7 @@ class Packet(DictConvertable):
     def __hash__(self):
         # careful with that axe, eugene!
         # to allow dict[packet], I'm making it hashable by id
-        # if we copy the object or recreate it somehow e.g. from_dict(to_dict) it will have a different hash
+        # if we copy the object or recreate it somehow, it will have a different hash
         # why not use the "uid"?
         # because if a packet gets split up into multiple packets, they all have the same uid
         return id(self)
@@ -83,6 +83,7 @@ class Packet(DictConvertable):
         return PacketMessage(self.to_dict())
 
     def __bytes__(self):
+        """encode a packet"""
         if ENCODING == "PICKLE":
             return pickle.dumps(self)
 
@@ -91,6 +92,7 @@ class Packet(DictConvertable):
 
     @classmethod
     def from_bytes(cls, bytez: bytes):
+        """decode a packet"""
         if ENCODING == "PICKLE":
             return pickle.loads(bytez)
 
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index c81166a..9be4139 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -100,7 +100,6 @@ class BrickRunner:
                     await self._stop.wait()
                     return
 
-                #else:
                 await self.input.start()
                 async for packet in self.input:
                     self.log.debug("process packet: %s", packet)
diff --git a/titanfe/apps/brick_runner/transport.py b/titanfe/apps/brick_runner/transport.py
index 6812e5c..d1ece89 100644
--- a/titanfe/apps/brick_runner/transport.py
+++ b/titanfe/apps/brick_runner/transport.py
@@ -1,3 +1,11 @@
+#
+# Copyright (c) 2019-present, wobe-systems GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# found in the LICENSE file in the root directory of this source tree.
+#
+"""transport medium for packets"""
+
 import asyncio
 
 import aioamqp
@@ -9,6 +17,8 @@ from titanfe.config import configuration as config
 
 
 class RabbitMQ:
+    """RabbitMQ transport for packets"""
+
     # TODO: make "robust" - handle reconnect and stuff
     #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
 
@@ -22,6 +32,7 @@ class RabbitMQ:
         self._channel: aioamqp.channel.Channel = None
 
     async def connect(self):
+        """create new rabbit mq connection/channel"""
         _, self._connection = await aioamqp.connect(
             host=config.rabbitmq_params.host,
             port=config.rabbitmq_params.port,
@@ -34,10 +45,11 @@ class RabbitMQ:
         await self._channel.basic_qos(prefetch_count=2)
 
     async def disconnect(self):
+        """disconnect"""
         if not self._connection:
             return
 
-        await asyncio.gather(*(self.stop_consumer(consumer) for consumer in list(self._consumers)))
+        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
 
         await self._channel.close()
         await self._connection.close()
@@ -52,7 +64,8 @@ class RabbitMQ:
         await self.connection()
         return self._channel
 
-    async def start_consumer(self, queue_name, on_new_message_callback):
+    async def start_consuming(self, queue_name, on_new_message_callback):
+        """start consuming the given queue"""
         async def callback_wrapper(msgchannel, body, envelope, _):
             async def done_callback():
                 await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
@@ -65,12 +78,14 @@ class RabbitMQ:
         consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
         self._consumers[queue_name] = consumer_tag["consumer_tag"]
 
-    async def stop_consumer(self, queue_name):
+    async def stop_consuming(self, queue_name):
+        """stop consuming the queue"""
         tag = self._consumers.pop(queue_name)
         channel = await self.channel()
         await channel.basic_cancel(tag)
 
     async def publish(self, queue_name, message):
+        """publish a message in the queue"""
         if not isinstance(message, bytes):
             message = bytes(message)
 
diff --git a/titanfe/apps/brick_runner/value_mapping.py b/titanfe/apps/brick_runner/value_mapping.py
index 02e3216..9cc9fa3 100644
--- a/titanfe/apps/brick_runner/value_mapping.py
+++ b/titanfe/apps/brick_runner/value_mapping.py
@@ -187,10 +187,10 @@ def ensure_ujo_key(key):
 class Buffer(MutableMapping):
     """A connections buffer of memorized upstream values"""
 
-    def __init__(self, ujoBuffer=None):
-        if ujoBuffer is None:
-            ujoBuffer = UjoMap()
-        self._elements = ujoBuffer
+    def __init__(self, ujo_buffer=None):
+        if ujo_buffer is None:
+            ujo_buffer = UjoMap()
+        self._elements = ujo_buffer
 
     def __repr__(self):
         return f"Buffer({self._elements!r})"
diff --git a/titanfe/apps/control_peer/brick.py b/titanfe/apps/control_peer/brick.py
index 20a2101..8231673 100644
--- a/titanfe/apps/control_peer/brick.py
+++ b/titanfe/apps/control_peer/brick.py
@@ -10,7 +10,7 @@
 import re
 import shutil
 from collections import namedtuple
-from dataclasses import dataclass, field
+from dataclasses import dataclass
 from datetime import datetime
 from io import BytesIO
 from pathlib import Path
@@ -106,16 +106,17 @@ class EnvBuilder(venv.EnvBuilder):
 
 @dataclass
 class PortDescription:
-    id: str
-    typeName: str
+    id: str  # pylint: disable=invalid-name
+    typeName: str  # pylint: disable=invalid-name
     schema: str
 
 
 @dataclass
 class Connection:
-    InstanceID: str
-    sourcePort: PortDescription
-    targetPort: PortDescription
+    """representation of a connection between two bricks"""
+    InstanceID: str  # pylint: disable=invalid-name  # sorry, that's what we get
+    sourcePort: PortDescription  # pylint: disable=invalid-name
+    targetPort: PortDescription  # pylint: disable=invalid-name
     autoscale_queue_level: int
     mapping: T.List["PortMapping"]
     buffer: T.Dict
@@ -126,27 +127,37 @@ class Connection:
 
     def __post_init__(self):
         if isinstance(self.sourcePort, T.Mapping):
-            self.sourcePort = PortDescription(**self.sourcePort)
+            self.sourcePort = PortDescription(**self.sourcePort)  # pylint: disable=not-a-mapping
 
         if isinstance(self.targetPort, T.Mapping):
-            self.targetPort = PortDescription(**self.targetPort)
+            self.targetPort = PortDescription(**self.targetPort)  # pylint: disable=not-a-mapping
 
 
 @dataclass
 class Connections:
+    """a collection of input and output connections"""
+
     input: T.Dict["PortName", T.List[Connection]]
     output: T.Dict["PortName", T.List[Connection]]
 
     def __post_init__(self):
         for port_name, connections in self.input.items():
             self.input[port_name] = [
-                (Connection(**connection) if isinstance(connection, T.Mapping) else connection)
+                (
+                    Connection(**connection)  # pylint: disable=not-a-mapping
+                    if isinstance(connection, T.Mapping)
+                    else connection
+                )
                 for connection in connections
             ]
 
         for port_name, connections in self.output.items():
             self.output[port_name] = [
-                (Connection(**connection) if isinstance(connection, T.Mapping) else connection)
+                (
+                    Connection(**connection)  # pylint: disable=not-a-mapping
+                    if isinstance(connection, T.Mapping)
+                    else connection
+                )
                 for connection in connections
             ]
 
@@ -224,7 +235,9 @@ class BrickBaseDefinition:
         self.exe = environment.exe
 
     def __repr__(self):
-        return f"Base({self.uid!r}, {self.name!r}, " f"module_path={self.module_path!r})"
+        return (
+            f"Base({self.uid!r}, {self.name!r}, " f"module_path={self.module_path!r})"
+        )
 
     async def install_or_update(self, update=True, force_update=False):
         """Get a brick from the package manager and install it"""
@@ -238,9 +251,9 @@ class BrickBaseDefinition:
 
             if not force_update:
                 last_modified_local = destination.stat().st_mtime
-                if datetime.utcfromtimestamp(last_modified_local) >= datetime.utcfromtimestamp(
-                    self.last_modified
-                ):
+                if datetime.utcfromtimestamp(
+                    last_modified_local
+                ) >= datetime.utcfromtimestamp(self.last_modified):
                     return
 
             shutil.rmtree(destination)
@@ -305,7 +318,9 @@ class BrickInstanceDefinition:
 
         logger = logging.TitanPlatformLogger(
             __name__,
-            context=logging.FlowContext(flow.uid, flow.name, instance_uid, instance_name),
+            context=logging.FlowContext(
+                flow.uid, flow.name, instance_uid, instance_name
+            ),
         )
 
         base = BrickBaseDefinition(
@@ -314,9 +329,13 @@ class BrickInstanceDefinition:
             family=config["family"],
             logger=logger,
         )
-        runtime_params = RuntimeParameters(*[config[f] for f in RuntimeParameters._fields])
+        runtime_params = RuntimeParameters(
+            *[config[f] for f in RuntimeParameters._fields]
+        )
         processing_params = config["parameters"]
-        connections = Connections(brick_description["Inbound"], brick_description["Outbound"])
+        connections = Connections(
+            brick_description["Inbound"], brick_description["Outbound"]
+        )
 
         instance = cls(
             instance_uid,
@@ -337,5 +356,6 @@ class BrickInstanceDefinition:
     @property
     def input_queues(self):
         return {
-            port["name"]: truncate(self.uid + ":" + port["name"], 255) for port in self.ports.input
+            port["name"]: truncate(self.uid + ":" + port["name"], 255)
+            for port in self.ports.input
         }
diff --git a/titanfe/config.py b/titanfe/config.py
index 3f9eb94..81e311f 100644
--- a/titanfe/config.py
+++ b/titanfe/config.py
@@ -50,6 +50,7 @@ OPTION_ALIASES = {
     "endpoint_provider": "EndpointProvider",
 }
 
+
 @dataclass
 class RabbitMQConnectionParams:
     host: str
@@ -66,7 +67,9 @@ class Configuration:
     kafka_log_topic: str = DEFAULT_KAFKA_LOG_TOPIC
 
     no_kafka_today: bool = literal_eval(
-        os.getenv("TITAN_METRICS_DISABLED") or os.getenv("TITANFE_WITHOUT_KAFKA") or "False"
+        os.getenv("TITAN_METRICS_DISABLED")
+        or os.getenv("TITANFE_WITHOUT_KAFKA")
+        or "False"
     )
     rabbitmq_url: str = DEFAULT_RABBITMQ_URL
     gridmanager_address: str = DEFAULT_GRIDMANAGER_ADDRESS
diff --git a/titanfe/connection.py b/titanfe/connection.py
index 18cbb29..6ecccd7 100644
--- a/titanfe/connection.py
+++ b/titanfe/connection.py
@@ -31,7 +31,9 @@ NetworkAddress = namedtuple("NetworkAddress", ("host", "port"))
 
 
 def decode_ujo_message(ujo_bytes):
-    """Decode ujo bytes into a corresponding python object, but keep an existing "Payload" as Ujo."""
+    """Decode ujo bytes into a corresponding python object,
+    but keep an existing "Payload" as Ujo.
+    """
     ujoobj = read_buffer(ujo_bytes)
     _, content = ujoobj[0], ujoobj[1]
 
diff --git a/titanfe/testing/testrunner.py b/titanfe/testing/testrunner.py
index 1bba043..a9ddff9 100644
--- a/titanfe/testing/testrunner.py
+++ b/titanfe/testing/testrunner.py
@@ -13,17 +13,14 @@ Fixtures for BrickRunner-Tests
 import asyncio
 import inspect
 import logging
-import queue
 import sys
 import threading
-from collections import defaultdict
-from datetime import datetime
 from typing import Union
 from unittest.mock import MagicMock
 
 import janus
 
-from apps.brick_runner.input import Input as OriginalInput
+from titanfe.apps.brick_runner.input import Input as OriginalInput
 from titanfe.apps.brick_runner.brick import Brick
 from titanfe.apps.brick_runner.packet import Packet
 from titanfe.apps.brick_runner.runner import BrickRunner
@@ -74,13 +71,13 @@ class Input(OriginalInput):
     """TestRunner: Input replacement"""
 
     def __init__(self):
-        self.Q = janus.Queue()
+        self.queue = janus.Queue()
 
         self.max_idle_time = 1  # give it at least a chance to run
         self.log = LOG
 
         self._getter = asyncio.Future()
-        self._packets = self.Q.async_q
+        self._packets = self.queue.async_q
 
     async def start(self):
         pass
@@ -88,17 +85,18 @@ class Input(OriginalInput):
     async def stop(self):
         self._getter.cancel()
 
-    def mark_done(self, packet):
+    def mark_done(self, _):
         self._packets.task_done()
 
 
 class Output:
+    """Output replacement"""
     def __init__(self):
-        self.Q = janus.Queue()
-        self.get = self.Q.sync_q.get
+        self.queue = janus.Queue()
+        self.get = self.queue.sync_q.get
 
     async def put(self, packet, port):
-        await self.Q.async_q.put((port, packet.payload))
+        await self.queue.async_q.put((port, packet.payload))
 
     async def close(self):
         pass
@@ -276,13 +274,14 @@ class TestRunner:
         runner = self.runner
 
         class PortProxy:
+            """facade for the input ports"""
             def __init__(self, name):
                 self.name = name
 
             def put(self, item):
                 if not isinstance(item, Packet):
                     item = Packet(payload=item)
-                runner.input.Q.sync_q.put((self.name, item))
+                runner.input.queue.sync_q.put((self.name, item))
 
         class InputProxy:
             """To get port/payload into the running runner..."""
@@ -291,7 +290,7 @@ class TestRunner:
             def put(item):
                 if not isinstance(item, Packet):
                     item = Packet(payload=item)
-                runner.input.Q.sync_q.put((DEFAULT_PORT, item))
+                runner.input.queue.sync_q.put((DEFAULT_PORT, item))
 
             @staticmethod
             def __getitem__(name):
@@ -307,6 +306,6 @@ class TestRunner:
         self.thread.start()
 
     def stop(self):
-        self.runner.input._getter.cancel()
+        self.runner.input._getter.cancel()  # pylint: disable=protected-access
         self.terminate.set()
         self.thread.join()
-- 
GitLab


From 38c6bd8dc822cc8d4afc0da2be3868a5a2b6fd11 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 19 Oct 2023 16:05:39 +0200
Subject: [PATCH 11/29] go fully single threaded

---
 requirements_prod.txt                  |   5 +-
 setup.py                               |   5 +-
 test/brick_runner/conftest.py          |  69 ++++++++++++--
 test/brick_runner/test_runner.py       |  88 +++++------------
 test/control_peer/test_webapi.py       |  21 ++++-
 titanfe/apps/brick_runner/__main__.py  |   6 +-
 titanfe/apps/brick_runner/brick.py     |  50 +++-------
 titanfe/apps/brick_runner/input.py     | 111 ++++++++++++++++++----
 titanfe/apps/brick_runner/output.py    | 126 ++++++++++++++++---------
 titanfe/apps/brick_runner/runner.py    |  78 +++++++--------
 titanfe/apps/brick_runner/transport.py |  98 -------------------
 titanfe/testing/testrunner.py          |   2 +-
 12 files changed, 341 insertions(+), 318 deletions(-)
 delete mode 100644 titanfe/apps/brick_runner/transport.py

diff --git a/requirements_prod.txt b/requirements_prod.txt
index 65a9f6f..1a7f9cd 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -6,8 +6,8 @@ janus
 aiokafka == 0.5.2 # fixed due to the kafka-python later version require
 kafka-python == 1.4.6
 elasticsearch == 7.8.*  # strangely 7.9.0 is missing the async parts that were introduced with 7.8.0?
-fastapi == 0.9.*
-starlette == 0.24.*
+fastapi == 0.85.1
+starlette ==0.20.4
 uvicorn == 0.9.*  # fixed due to the uvloop later versions require
 aiohttp >= 3.6.2
 aiohttp-requests >= 0.1.3  # as required for elasticsearch async
@@ -16,6 +16,7 @@ requests
 docopt
 pycryptodome
 aioamqp
+pika
 
 # linux only:
 uvloop == 0.13.*;platform_system=="Linux"
diff --git a/setup.py b/setup.py
index 0499689..08d639f 100644
--- a/setup.py
+++ b/setup.py
@@ -70,8 +70,8 @@ setup(
         "aiokafka == 0.5.2",  # fixed due to the kafka-python version required by later versions
         "kafka-python == 1.4.6",  # aiokafka 0.5.2 requires this version
         "elasticsearch == 7.8.*",
-        "starlette == 0.24.*",
-        "fastapi == 0.9.*",
+        "fastapi == 0.85.1",
+        "starlette == 0.20.4",
         "uvicorn == 0.9.*",  # fixed due to the uvloop later versions require
         'uvloop == 0.13.* ;platform_system=="Linux"',
         "aiohttp >= 3.6.2",
@@ -79,6 +79,7 @@ setup(
         "dataclasses-json",
         "requests",
         "docopt",
+        "pika",
         "pycryptodome",
     ],
     ext_modules=[],
diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index 3b7eb14..3efd878 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -8,10 +8,13 @@
 """
 Fixtures for BrickRunner-Tests
 """
+import asyncio
+import queue
 # pylint: disable=redefined-outer-name
 import sys
 import types
 import logging
+from collections import defaultdict
 from pathlib import Path
 from unittest.mock import MagicMock, patch
 
@@ -65,9 +68,59 @@ class GridManagerDummy:
     register_runner = MagicMock()
 
 
+class InputTransportDouble:
+    def __init__(self, *args, **kwargs):
+        self.IN = defaultdict(asyncio.Queue)
+
+    async def start_consuming(self, queue_name, callback):
+        async def _start_consuming():
+            async def done_callback():
+                self.IN[queue_name].task_done()
+
+            while True:
+                try:
+                    q = self.IN[queue_name]
+                    message = await asyncio.wait_for(q.get(), timeout=0.25)
+                except asyncio.TimeoutError:
+                    continue
+
+                await callback(message, done_callback)
+
+        asyncio.create_task(_start_consuming())
+
+    async def stop_consuming(self, queue_name):
+        pass
+
+
+@pytest.fixture()
+def patched_input_transport():
+    input_transport_path = "titanfe.apps.brick_runner.input.Transport"
+    with patch(input_transport_path, InputTransportDouble):
+        yield
+
+
+class OutputTransportDouble:
+    def __init__(self, exchange_name, logger=None):
+        self.exchange_name = exchange_name
+        self.OUT = asyncio.Queue()
+
+    def publish(self, queue_name, message):
+        async def _publish():
+            await self.OUT.put((queue_name, message))
+
+        asyncio.create_task(_publish())
+
+
+@pytest.fixture()
+def patched_output_transport():
+    input_transport_path = "titanfe.apps.brick_runner.output.Transport"
+    with patch(input_transport_path, OutputTransportDouble):
+        yield
+
+
 @pytest.mark.asyncio
 @pytest.fixture()
-async def brick_runner():
+async def brick_runner(patched_input_transport, patched_output_transport):
     """set up a brick runner, with networking patched away"""
     guess_module_path = "titanfe.apps.control_peer.brick.BrickBaseDefinition.guess_module_path"
     with patch(guess_module_path, MagicMock(return_value=Path("n/a"))):
@@ -102,12 +155,12 @@ async def brick_runner():
                             "InstanceID": "Next-Dummy",
                             "autoscale_queue_level": 0,
                             "mapping": [
-                                dict(
-                                    type="port",
-                                    buffer_id="",
-                                    source_fields=["output"],
-                                    target_fields=["test"],
-                                )
+                                {
+                                    "type": "port",
+                                    "buffer_id": "",
+                                    "source_fields": ["output"],
+                                    "target_fields": ["test"],
+                                }
                             ],
                             "buffer": {},
                             "sourcePort": {
@@ -137,5 +190,3 @@ async def brick_runner():
 
     yield brick_runner
 
-    await brick_runner.stop_processing()
-    await brick_runner.shutdown()
diff --git a/test/brick_runner/test_runner.py b/test/brick_runner/test_runner.py
index 033057f..1431cad 100644
--- a/test/brick_runner/test_runner.py
+++ b/test/brick_runner/test_runner.py
@@ -12,11 +12,13 @@ Test the BrickRunner itself
 # pylint: disable=redefined-outer-name
 
 import asyncio
+import queue
 from collections import defaultdict
+from unittest.mock import patch
 
 import pytest
 
-from apps.brick_runner.transport import RabbitMQ
+
 from ujotypes import UjoStringC
 
 from titanfe.apps.brick_runner.value_mapping import Buffer
@@ -25,69 +27,33 @@ from titanfe.brick import BrickBase
 from titanfe.constants import DEFAULT_PORT
 
 
-class RabbitMQDouble(RabbitMQ):
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        self.IN = defaultdict(asyncio.Queue)
-        self.OUT = asyncio.Queue()
-
-    async def start_consuming(self, queue_name, callback):
-        message = await self.IN[queue_name].get()
-
-        async def done_callback():
-            self.IN[queue_name].task_done()
-
-        callback(message, done_callback())
-
-    async def stop_consuming(self, queue_name):
-        pass
-
-    async def publish(self, queue_name, message):
-        await self.OUT.put(
-            {
-                "message": message,
-                "exchange": self.exchange_name,
-                "routing_key": queue_name,
-            }
-        )
 
 
 @pytest.mark.asyncio
-async def test_basic_packet_processing(brick_runner):  # noqa: F811
+async def test_basic_packet_processing(brick_runner
+):  # noqa: F811
     """A Packet is taken from the input, processed within the Brick's module
     and ends up in the output with a new payload"""
 
     brick_runner.brick.is_inlet = False
 
     class Brick(BrickBase):
-        def process(
-            self, input, port
-        ):  # pylint: disable=unused-argument, redefined-builtin
+        def process(self, input, port):  # pylint: disable=unused-argument, redefined-builtin
             return UjoStringC("NewValue")
 
     brick_runner.brick.module.Brick = Brick
 
-    rmq_double = RabbitMQDouble("dummy_exchange")
-    brick_runner.input.transport = rmq_double
-    brick_runner.output.transport = rmq_double
+    brick_runner.input._queues = {DEFAULT_PORT: DEFAULT_PORT}
 
     runner_run = asyncio.create_task(brick_runner.run())
 
     payload = UjoStringC("InitialValue")
+    input_packet1 = Packet(uid="Test1", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
+    input_packet2 = Packet(uid="Test2", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
+    input_packet3 = Packet(uid="Test3", payload=payload, buffer=Buffer(), port=DEFAULT_PORT)
 
-    input_packet1 = Packet(
-        uid="Test1", payload=payload, buffer=Buffer(), port=DEFAULT_PORT
-    )
-    input_packet2 = Packet(
-        uid="Test2", payload=payload, buffer=Buffer(), port=DEFAULT_PORT
-    )
-    input_packet3 = Packet(
-        uid="Test3", payload=payload, buffer=Buffer(), port=DEFAULT_PORT
-    )
-
-    await brick_runner.input._packets.put((DEFAULT_PORT, input_packet1))
-    await brick_runner.input._packets.put((DEFAULT_PORT, input_packet2))
-    await brick_runner.input._packets.put((DEFAULT_PORT, input_packet3))
+    await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet1)
+    await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet2)
 
     # {
     #     'message': Packet(uid="Test", payload="NewValue"),
@@ -95,48 +61,44 @@ async def test_basic_packet_processing(brick_runner):  # noqa: F811
     #     'routing_key': ('Next-Dummy:Input',)
     # }
 
-    brick_output = await rmq_double.OUT.get()
-    packet = brick_output["message"]
+    queue_name, packet = await brick_runner.output.transport.OUT.get()
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet1.uid == packet.uid
 
-    brick_output = await rmq_double.OUT.get()
-    packet = brick_output["message"]
+    queue_name, packet = await brick_runner.output.transport.OUT.get()
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet2.uid == packet.uid
 
-    brick_output = await rmq_double.OUT.get()
-    packet = brick_output["message"]
+    await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet3)
+
+    queue_name, packet = await brick_runner.output.transport.OUT.get()
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet3.uid == packet.uid
 
-    await brick_runner.stop_processing()
+    brick_runner.schedule_shutdown()
     await runner_run
 
 
+@pytest.mark.skip("we have disabled the brick runner life cycle management")
 @pytest.mark.asyncio
-async def test_exit_when_idle(brick_runner):  # noqa: F811
+async def test_exit_when_idle(
+    brick_runner
+):  # noqa: F811
     """The runner should exit after being idle for a specific amount of time"""
     max_idle_seconds = 0.2
 
     class Brick(BrickBase):
-        def process(
-            self, input, port
-        ):  # pylint: disable=unused-argument, redefined-builtin
+        def process(self, input, port):  # pylint: disable=unused-argument, redefined-builtin
             return UjoStringC("NewValue")
 
     brick_runner.brick.module.Brick = Brick
     brick_runner.brick.is_inlet = False
 
-    rmq_double = RabbitMQDouble("dummy_exchange")
-    brick_runner.input.transport = rmq_double
-    brick_runner.output.transport = rmq_double
-
-    await brick_runner.stop_processing()
-
     try:
         await asyncio.wait_for(brick_runner.run(), timeout=max_idle_seconds * 10)
     except TimeoutError:
         assert False
     else:
         assert True
+
+    brick_runner.schedule_shutdown()
\ No newline at end of file
diff --git a/test/control_peer/test_webapi.py b/test/control_peer/test_webapi.py
index 10bf7a5..fb84dec 100644
--- a/test/control_peer/test_webapi.py
+++ b/test/control_peer/test_webapi.py
@@ -61,7 +61,6 @@ def test_stop_flow(client, control_peer):
 
 
 def test_start_flow(client, control_peer):
-
     with mock.patch.object(
         control_peer, "start_new_runner", return_value=MagicMock()
     ) as start_method:
@@ -91,8 +90,26 @@ def test_start_flow(client, control_peer):
                         "output": [
                             {
                                 "InstanceID": "Slacker1-1956b1af-e13e-11ea-9efd-00059a3c7a00",
-                                "Port": "input",
                                 "autoscale_queue_level": 10,
+                                "mapping": [
+                                    {
+                                        "type": "port",
+                                        "buffer_id": "",
+                                        "source_fields": ["output"],
+                                        "target_fields": ["test"],
+                                    }
+                                ],
+                                "buffer": {},
+                                "sourcePort": {
+                                    "id": "Out",
+                                    "typeName": "test",
+                                    "schema": "test = variant;",
+                                },
+                                "targetPort": {
+                                    "id": "Input",
+                                    "typeName": "test",
+                                    "schema": "test = variant;",
+                                },
                             }
                         ]
                     },
diff --git a/titanfe/apps/brick_runner/__main__.py b/titanfe/apps/brick_runner/__main__.py
index c3cfe30..681b829 100644
--- a/titanfe/apps/brick_runner/__main__.py
+++ b/titanfe/apps/brick_runner/__main__.py
@@ -35,10 +35,7 @@ async def run_app(args):
     brick = pickle.loads(args.brick)
 
     runner = await BrickRunner.create(args.id, brick)
-    try:
-        await runner.run()
-    except KeyboardInterrupt:
-        runner.schedule_shutdown()
+    await runner.run()
 
 
 def main():
@@ -56,3 +53,4 @@ def main():
 
 if __name__ == "__main__":
     main()
+    sys.exit(0)
\ No newline at end of file
diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index 35fe36c..7f4cbf3 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -6,23 +6,22 @@
 #
 
 """ A Brick within the brick runner """
-import asyncio
 import time
 from collections import namedtuple
 from copy import copy
 
 import janus
-from ujotypes import UjoBase
 
 from titanfe import log as logging
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe.brick import InletBrickBase
-from titanfe.utils import get_module, time_delta_in_ms
+from titanfe.constants import DEFAULT_PORT
 from titanfe.ujo_helper import python_to_ujo
+from titanfe.utils import get_module, time_delta_in_ms
+from ujotypes import UjoBase
 from .adapter import BrickAdapter, AdapterMeta
 from .output import Output
 from .packet import Packet
-from ...constants import DEFAULT_PORT
 
 PortMapping = namedtuple("PortMapping", ("rules", "type"))
 
@@ -69,7 +68,7 @@ class Brick:
 
         self.adapter = BrickAdapter(
             AdapterMeta(brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)),
-            self.adapter_output_callback,
+            self.output_packet,
             self.log,
             self.default_port,
         )
@@ -77,6 +76,8 @@ class Brick:
         self.instance = None
         self.last_execution_start = None
 
+        self._current_packet = None
+
     def create_instance(self):
         """create an instance of the actual Brick"""
         try:
@@ -101,12 +102,9 @@ class Brick:
     def execution_time(self):
         return time_delta_in_ms(self.last_execution_start)
 
-    def adapter_output_callback(self, value, port=None):
-        self.log.debug("brick emits on port [%s]: %r", port, value)
-        self._brick_output.sync_q.put((value, port))
-        self._brick_output.sync_q.join()
-
     async def process(self, packet):
+        self._current_packet = packet
+
         """run the brick module for the given packet in a separate thread"""
         self.log.info(
             "(%s) execute Brick: %s(%s) for %r",
@@ -122,13 +120,7 @@ class Brick:
         if not self.is_inlet:
             payload = packet.payload
 
-        loop = asyncio.get_event_loop()
-        execution = loop.run_in_executor(None, self.run_instance_processing, payload, packet.port)
-
-        if not self.is_outlet:
-            await self.process_output(parent_packet=packet)
-
-        await execution
+        self.run_instance_processing(payload, packet.port)
 
         await self.metric_emitter.emit_brick_metrics(self.execution_time)
         if self.is_outlet:
@@ -150,28 +142,12 @@ class Brick:
                     # we should make that more explicit somehow
                     payload, port = result
 
-                self._brick_output.sync_q.put((payload, port))
+                self.output_packet(payload, port)
 
         except Exception as error:  # pylint: disable=broad-except
             self.log.with_context.error("brick execution failed: %r", error, exc_info=True)
 
-        self._brick_output.sync_q.put(SENTINEL)
-
-    async def process_output(self, parent_packet):
-        """publish the bricks output"""
-        while True:
-            brick_output = await self._brick_output.async_q.get()
-            self.log.debug("process brick output: %r", brick_output)
-            try:
-                if brick_output is SENTINEL:
-                    return
-                if brick_output is not None:
-                    await self.publish_packet(*brick_output, parent_packet)
-
-            finally:
-                self._brick_output.async_q.task_done()
-
-    async def publish_packet(self, payload, port, parent_packet):
+    def output_packet(self, payload, port=None):
         """publish the packet"""
         if port is None:
             port = self.default_port
@@ -179,8 +155,8 @@ class Brick:
         if not isinstance(payload, UjoBase):
             payload = python_to_ujo(payload)
 
-        packet = Packet(port=port) if self.is_inlet else copy(parent_packet)
+        packet = Packet(port=port) if self.is_inlet else copy(self._current_packet)
         packet.payload = payload
 
         self.log.debug("brick output on port [%s]: %r", port, packet)
-        await self.output.put(packet, port)
+        self.output.put(packet, port)
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index 03303e8..c073a49 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -10,29 +10,31 @@ import asyncio
 import functools
 import typing as T
 
+import aioamqp
+import aioamqp.protocol
+import aioamqp.channel
+from wheel.cli.pack import pack
+
 import titanfe.log
 from titanfe.apps.brick_runner.packet import Packet
+from titanfe.config import configuration as config
 
 Port = T.NewType("Port", str)
 
 
 class Input:
     """the input side of brick, using rabbitmq as transport medium"""
+
     def __init__(
         self,
         input_queues: T.Dict[Port, str],
-        transport: "transport.RabbitMQ",
         max_idle_time=60,
         logger=None,
     ):
-        self.transport = transport
-        self.log = (
-            logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
-        )
+        self.log = logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
+        self.transport = Transport(self.log.getChild("Transport"))
 
-        self.max_idle_time = max(
-            (max_idle_time, 0.2)
-        )  # give it at least a chance to run
+        self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
 
         self._getter = asyncio.Future()
         self._queues = input_queues
@@ -47,9 +49,7 @@ class Input:
             self._getter = asyncio.create_task(self._packets.get())
             try:
                 self.log.debug("wait for packet")
-                port, packet = await asyncio.wait_for(
-                    self._getter, timeout=self.max_idle_time
-                )
+                port, packet = await asyncio.wait_for(self._getter, timeout=self.max_idle_time)
                 self.log.debug("got packet: %r", packet)
             except asyncio.CancelledError:
                 self.log.debug("packet getter was cancelled")
@@ -69,9 +69,7 @@ class Input:
         self.log.debug("start consumers: %r", self._queues)
         await asyncio.gather(
             *(
-                self.transport.start_consuming(
-                    queue, functools.partial(self._on_new_message, port)
-                )
+                self.transport.start_consuming(queue, functools.partial(self._on_new_message, port))
                 for port, queue in self._queues.items()
             )
         )
@@ -86,11 +84,9 @@ class Input:
     async def _on_new_message(self, port, packet, done_callback: T.Callable):
         self.log.debug("received on port %r: %r", port, packet)
 
-        if not isinstance(packet, (bytes, Packet)):
-            raise TypeError("only Packets and byte-encoded packets are supported")
-
         try:
-            packet = Packet.from_bytes(packet) if isinstance(packet, bytes) else packet
+            if not isinstance(packet, Packet):
+                packet = Packet.from_bytes(packet)
         except Exception:  # pylint: disable=broad-exception-caught  # who knows what might go wrong
             self.log.error("Failed to convert message to packet", exc_info=True)
             done_callback()
@@ -108,3 +104,82 @@ class Input:
         done_callback = self._packet_done_callbacks.pop(packet)
         asyncio.create_task(done_callback())
         self._packets.task_done()
+
+
+class Transport:
+    """RabbitMQ transport for packets"""
+
+    # TODO: make "robust" - handle reconnect and stuff
+    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
+
+    def __init__(self, exchange_name, logger=None):
+        self.log = (
+            logger.getChild("transport.RabbitMQ") if logger else titanfe.log.getLogger(__name__)
+        )
+        self.exchange_name = exchange_name
+        self._consumers = {}
+        self._connection: aioamqp.protocol.AmqpProtocol = None
+        self._channel: aioamqp.channel.Channel = None
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        for c in (self._channel, self._connection):
+            if not c:
+                continue
+            try:
+                c.close()
+            except Exception:  # pylint: disable=broad-except
+                self.log.warning("failed to close channel/connection", exc_info=True)
+
+    async def connect(self):
+        """create new rabbit mq connection/channel"""
+        _, self._connection = await aioamqp.connect(
+            host=config.rabbitmq_params.host,
+            port=config.rabbitmq_params.port,
+            login=config.rabbitmq_params.user,
+            password=config.rabbitmq_params.password,
+            heartbeat=60,
+        )
+        self.log.debug("connected to rabbitmq")
+        self._channel = await self._connection.channel()
+        await self._channel.basic_qos(prefetch_count=2)
+
+    async def disconnect(self):
+        """disconnect"""
+        if not self._connection:
+            return
+
+        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
+
+        await self._channel.close()
+        await self._connection.close()
+        self.log.debug("disconnected from rabbitmq")
+
+    async def connection(self):
+        if not self._connection:
+            await self.connect()
+        return self._connection
+
+    async def channel(self) -> aioamqp.channel.Channel:
+        await self.connection()
+        return self._channel
+
+    async def start_consuming(self, queue_name, on_new_message_callback):
+        """start consuming the given queue"""
+
+        async def callback_wrapper(msgchannel, body, envelope, _):
+            async def done_callback():
+                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
+
+            await on_new_message_callback(body, done_callback)
+
+        channel = await self.channel()
+
+        await channel.queue_declare(queue_name, durable=True)
+        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
+        self._consumers[queue_name] = consumer_tag["consumer_tag"]
+
+    async def stop_consuming(self, queue_name):
+        """stop consuming the queue"""
+        tag = self._consumers.pop(queue_name)
+        channel = await self.channel()
+        await channel.basic_cancel(tag)
diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
index 816a4ef..b5921ea 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/titanfe/apps/brick_runner/output.py
@@ -6,24 +6,74 @@
 #
 
 """The output with its server and ports"""
-import asyncio
 import typing as T
 from copy import deepcopy, copy
 from dataclasses import dataclass
 
+import pika
 from UJOSchema import schema_to_type
-
 from ujotypes import UjoBase, UJO_VARIANT_NONE
 
 import titanfe.log
+from titanfe.config import configuration as config
 from titanfe.apps.brick_runner.packet import Packet
 from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
 from titanfe.apps.control_peer.brick import Connection
 
 
+@dataclass
+class Port:
+    name: str
+    consumers: T.List["Consumer"]
+
+
+class Output:
+    """The output side of a brick runner creates a Server.
+       It will then send packets as requested by the following inputs.
+
+    Arguments:
+        runner (BrickRunner): instance of a parent brick runner
+        name (str): a name for the output destination
+        address (NetworkAddress): the network address of the output server
+    """
+
+    def __init__(
+        self,
+        output_connections: T.Dict["PortName", T.List[Connection]],
+        exchange_name,
+        logger=None,
+    ):
+        self.log = (
+            logger.getChild("Output") if logger else titanfe.log.getLogger(__name__)
+        )
+
+        self.ports: T.Dict[str, Port] = {
+            port_name: Port(
+                port_name,
+                [Consumer.from_connection(connection) for connection in targets],
+            )
+            for port_name, targets in output_connections.items()
+        }
+
+        self.transport = Transport(exchange_name, self.log)
+
+    def __getitem__(self, port) -> Port:
+        return self.ports[port]
+
+    def put(self, packet, port):
+        consumers = self[port].consumers
+        self.log.debug(
+            "publish %r on port %r to consumers: %r", packet, port, consumers
+        )
+        for consumer in consumers:
+            next_packet = consumer.create_packet(packet)
+            self.transport.publish(consumer.queue_name, next_packet)
+
+
 @dataclass
 class Consumer:
     """everything we need to know about a subsequent brick"""
+
     queue_name: str
     target_type: UjoBase
     mapping_rules: MappingRules
@@ -34,7 +84,9 @@ class Consumer:
         return cls(
             queue_name=connection.target_queue,
             target_type=(
-                schema_to_type(connection.targetPort.schema, connection.targetPort.typeName)
+                schema_to_type(
+                    connection.targetPort.schema, connection.targetPort.typeName
+                )
                 if connection.targetPort.schema
                 else UJO_VARIANT_NONE
             ),
@@ -70,48 +122,34 @@ class Consumer:
         return packet
 
 
-@dataclass
-class Port:
-    name: str
-    consumers: T.List[Consumer]
-
-
-class Output:
-    """The output side of a brick runner creates a Server.
-       It will then send packets as requested by the following inputs.
-
-    Arguments:
-        runner (BrickRunner): instance of a parent brick runner
-        name (str): a name for the output destination
-        address (NetworkAddress): the network address of the output server
-    """
-
-    def __init__(
-        self,
-        output_connections: T.Dict["PortName", T.List[Connection]],
-        transport: "transport.RabbitMQ",
-        logger=None,
-    ):
-        self.transport = transport
-        self.log = logger.getChild("Output") if logger else titanfe.log.getLogger(__name__)
-
-        self.ports: T.Dict[str, Port] = {
-            port_name: Port(
-                port_name,
-                [Consumer.from_connection(connection) for connection in targets],
-            )
-            for port_name, targets in output_connections.items()
-        }
+class Transport:
+    def __init__(self, exchange_name, logger):
+        self.log = logger.getChild("Transport")
 
-    def __getitem__(self, port) -> Port:
-        return self.ports[port]
+        self.exchange_name = exchange_name
 
-    async def put(self, packet, port):
-        consumers = self[port].consumers
-        self.log.debug("publish %r on port %r to consumers: %r", packet, port, consumers)
-        await asyncio.gather(
-            *(
-                self.transport.publish(consumer.queue_name, consumer.create_packet(packet))
-                for consumer in consumers
+        self.connection = pika.BlockingConnection(
+            pika.ConnectionParameters(
+                host=config.rabbitmq_params.host,
+                port=config.rabbitmq_params.port,
+                credentials=pika.credentials.PlainCredentials(
+                    config.rabbitmq_params.user, config.rabbitmq_params.password
+                ),
             )
         )
+
+        self.channel = self.connection.channel()
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        for c in (self.channel, self.connection):
+            try:
+                c.close()
+            except Exception:  # pylint: disable=broad-except
+                self.log.warning("failed to close channel/connection", exc_info=True)
+
+    def publish(self, queue_name, packet):
+        self.channel.basic_publish(
+            self.exchange_name,
+            routing_key=queue_name,
+            body=bytes(packet),
+        )
\ No newline at end of file
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index 9be4139..b1d6bce 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -19,7 +19,10 @@ from .input import Input
 from .metrics import MetricEmitter
 from .output import Output
 from .packet import Packet
-from .transport import RabbitMQ
+
+
+class ForcedShutdown(Exception):
+    """forcefully shutting down"""
 
 
 class BrickRunner:
@@ -41,16 +44,9 @@ class BrickRunner:
         self.input = None
         self.output = None
         self.brick = None
-        self.server = None
-        self.address = (None, None)
-        self.gridmanager = None
-        self.message_transport = None
-
-        self.setup_completed = asyncio.Event()
 
         self.idle_since = None
         self.metric_emitter = None
-        self.tasks = []
         self._stop = asyncio.Event()
 
     @classmethod
@@ -67,16 +63,14 @@ class BrickRunner:
             context=logging.global_context,
         )
 
-        self.message_transport = RabbitMQ(brick_definition.message_exchange)
         self.input = Input(
             brick_definition.input_queues,
             max_idle_time=brick_definition.runtime_parameters.exit_after_idle_seconds,
-            transport=self.message_transport,
             logger=self.log,
         )
         self.output = Output(
             brick_definition.connections.output,
-            transport=self.message_transport,
+            brick_definition.message_exchange,
             logger=self.log,
         )
 
@@ -87,7 +81,6 @@ class BrickRunner:
         self.metric_emitter.set_metadata_from_runner(self)
 
         self.add_signal_handlers()
-        self.setup_completed.set()
 
     async def run(self):
         """process items from the input"""
@@ -97,24 +90,49 @@ class BrickRunner:
             with self.brick:
                 if self.brick.is_inlet:
                     await self.brick.process(Packet())
+                    # in case the inlet terminates by itself, the following line forces
+                    # the brickrunner to wait and keep living until someone stops the flow.
+                    # otherwise the GM would keep restarting the inlet.
                     await self._stop.wait()
-                    return
-
-                await self.input.start()
-                async for packet in self.input:
-                    self.log.debug("process packet: %s", packet)
-                    await self.brick.process(packet)
-                    self.input.mark_done(packet)
+                else:
+                    await self.input.start()
+                    async for packet in self.input:
+                        self.log.debug("process packet: %s", packet)
+                        try:
+                            await self.brick.process(packet)
+                        except Exception as exception:
+                            self.log.with_context.error(
+                                "processing packet: %s failed: %r",
+                                packet.payload,
+                                exception,
+                                exc_info=True,
+                            )
+                        finally:
+                            self.input.mark_done(packet)
         except Exception:  # pylint: disable=broad-except
             self.log.with_context.error("Brick failed", exc_info=True)
-        self.log.with_context.info("Exit")
 
-    def schedule_shutdown(self, sig, frame):  # pylint: disable=unused-argument
+        await asyncio.sleep(0.1)  # last chance for other tasks to finish
+        self.log.with_context.warning("Exit")
+
+    def schedule_shutdown(self, sig=signal.SIGABRT, frame=None):  # pylint: disable=unused-argument
         self.log.info(
             "Received signal %s - scheduling shutdown",
             signal.Signals(sig).name,  # pylint: disable=no-member
         )  # pylint: disable=no-member
-        asyncio.create_task(self.shutdown())
+
+        self._stop.set()
+        self.brick.terminate()
+
+        async def shutdown():
+            # stop async stuff
+            if not self.brick.is_inlet:
+                await self.input.stop()
+
+            logging.flush_kafka_log_handler()
+            await self.metric_emitter.stop()
+
+        asyncio.create_task(shutdown())
 
     def add_signal_handlers(self):
         signals = (signal.SIGINT, signal.SIGTERM)
@@ -122,19 +140,3 @@ class BrickRunner:
             signals += (signal.SIGBREAK,)  # pylint: disable=no-member
         for sig in signals:
             signal.signal(sig, self.schedule_shutdown)
-
-    async def stop_processing(self):
-        """stop processing bricks"""
-        self.log.info("Stop Processing")
-        self._stop.set()
-        logging.flush_kafka_log_handler()
-        await self.input.stop()
-        self.brick.terminate()
-        await self.message_transport.disconnect()
-        await self.metric_emitter.stop()
-
-    async def shutdown(self):
-        """shuts down the brick runner"""
-        self.log.with_context.info("Initiating Shutdown")
-        await self.stop_processing()
-        await cancel_tasks(self.tasks, wait_cancelled=True)
diff --git a/titanfe/apps/brick_runner/transport.py b/titanfe/apps/brick_runner/transport.py
deleted file mode 100644
index d1ece89..0000000
--- a/titanfe/apps/brick_runner/transport.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-"""transport medium for packets"""
-
-import asyncio
-
-import aioamqp
-import aioamqp.protocol
-import aioamqp.channel
-
-import titanfe.log
-from titanfe.config import configuration as config
-
-
-class RabbitMQ:
-    """RabbitMQ transport for packets"""
-
-    # TODO: make "robust" - handle reconnect and stuff
-    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
-
-    def __init__(self, exchange_name, logger=None):
-        self.log = (
-            logger.getChild("transport.RabbitMQ") if logger else titanfe.log.getLogger(__name__)
-        )
-        self.exchange_name = exchange_name
-        self._consumers = {}
-        self._connection: aioamqp.protocol.AmqpProtocol = None
-        self._channel: aioamqp.channel.Channel = None
-
-    async def connect(self):
-        """create new rabbit mq connection/channel"""
-        _, self._connection = await aioamqp.connect(
-            host=config.rabbitmq_params.host,
-            port=config.rabbitmq_params.port,
-            login=config.rabbitmq_params.user,
-            password=config.rabbitmq_params.password,
-            heartbeat=60,
-        )
-        self.log.debug("connected to rabbitmq")
-        self._channel = await self._connection.channel()
-        await self._channel.basic_qos(prefetch_count=2)
-
-    async def disconnect(self):
-        """disconnect"""
-        if not self._connection:
-            return
-
-        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
-
-        await self._channel.close()
-        await self._connection.close()
-        self.log.debug("disconnected from rabbitmq")
-
-    async def connection(self):
-        if not self._connection:
-            await self.connect()
-        return self._connection
-
-    async def channel(self) -> aioamqp.channel.Channel:
-        await self.connection()
-        return self._channel
-
-    async def start_consuming(self, queue_name, on_new_message_callback):
-        """start consuming the given queue"""
-        async def callback_wrapper(msgchannel, body, envelope, _):
-            async def done_callback():
-                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
-
-            await on_new_message_callback(body, done_callback)
-
-        channel = await self.channel()
-
-        await channel.queue_declare(queue_name, durable=True)
-        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
-        self._consumers[queue_name] = consumer_tag["consumer_tag"]
-
-    async def stop_consuming(self, queue_name):
-        """stop consuming the queue"""
-        tag = self._consumers.pop(queue_name)
-        channel = await self.channel()
-        await channel.basic_cancel(tag)
-
-    async def publish(self, queue_name, message):
-        """publish a message in the queue"""
-        if not isinstance(message, bytes):
-            message = bytes(message)
-
-        self.log.debug("publish to %r: %r", queue_name, message)
-
-        channel = await self.channel()
-        try:
-            await channel.basic_publish(message, self.exchange_name, routing_key=queue_name)
-        except aioamqp.exceptions.ChannelClosed:
-            pass  # we are most likely shutting down operations
diff --git a/titanfe/testing/testrunner.py b/titanfe/testing/testrunner.py
index a9ddff9..cd3d269 100644
--- a/titanfe/testing/testrunner.py
+++ b/titanfe/testing/testrunner.py
@@ -248,7 +248,7 @@ class TestRunner:
 
         runner.brick.is_inlet = issubclass(runner.brick.module.Brick, InletBrickBase)
 
-        runner.tasks.append(asyncio.create_task(self.check_terminate(runner)))
+        runner._tasks.append(asyncio.create_task(self.check_terminate(runner)))
 
         runner.setup_completed.set()
         self.setup_completed.set()
-- 
GitLab


From ddbb67f6079c332b722b19215835bbcd3b35d41e Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 23 Oct 2023 16:06:20 +0200
Subject: [PATCH 12/29] fix linting issues

---
 .flake8                               | 18 ++++++++++++------
 test/brick_runner/conftest.py         | 23 +++++++++++++----------
 test/brick_runner/test_runner.py      | 24 +++++-------------------
 titanfe/apps/brick_runner/__main__.py |  2 +-
 titanfe/apps/brick_runner/brick.py    |  6 ++++--
 titanfe/apps/brick_runner/input.py    |  7 +++----
 titanfe/apps/brick_runner/output.py   | 19 +++++++------------
 titanfe/apps/brick_runner/runner.py   |  5 +++--
 titanfe/testing/testrunner.py         |  4 +---
 9 files changed, 49 insertions(+), 59 deletions(-)

diff --git a/.flake8 b/.flake8
index 5f9064f..e9632b0 100644
--- a/.flake8
+++ b/.flake8
@@ -1,9 +1,15 @@
 [flake8]
-exclude = doc,
-          dockerfiles,
-          site-packages,
-          experiments,
-          build,
-          setup.py
+exclude =
+    doc,
+    dockerfiles,
+    site-packages,
+    experiments,
+    build,
+    setup.py,
+    get-pip.py,
+
+extend-ignore =
+  E203,  # whitespace before ':' (black occasionally enforces this)
+  F821,  # undefined name (unfortunately it trips over forward declarations in type hints)
 
 max-line-length = 100
diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index 3efd878..d840c86 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -9,8 +9,6 @@
 Fixtures for BrickRunner-Tests
 """
 import asyncio
-import queue
-# pylint: disable=redefined-outer-name
 import sys
 import types
 import logging
@@ -25,6 +23,8 @@ from titanfe.apps.brick_runner.runner import BrickRunner
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe.config import configuration
 
+# pylint: disable=redefined-outer-name
+
 logging.basicConfig(
     stream=sys.stdout, format="%(asctime)s %(levelname)s %(name)s: %(message)s", level=logging.ERROR
 )
@@ -69,18 +69,20 @@ class GridManagerDummy:
 
 
 class InputTransportDouble:
-    def __init__(self, *args, **kwargs):
-        self.IN = defaultdict(asyncio.Queue)
+    """replace rabbit mq"""
+    def __init__(self, *_, **__):
+        self.IN = defaultdict(asyncio.Queue)  # pylint: disable=invalid-name
 
     async def start_consuming(self, queue_name, callback):
+        """begin consuming the queue"""
         async def _start_consuming():
             async def done_callback():
                 self.IN[queue_name].task_done()
 
             while True:
                 try:
-                    q = self.IN[queue_name]
-                    message = await asyncio.wait_for(q.get(), timeout=0.25)
+                    _queue = self.IN[queue_name]
+                    message = await asyncio.wait_for(_queue.get(), timeout=0.25)
                 except asyncio.TimeoutError:
                     continue
 
@@ -99,10 +101,11 @@ def patched_input_transport():
         yield
 
 
-class OutputTransportDouble:
-    def __init__(self, exchange_name, logger=None):
+class OutputTransportDouble:  # pylint: disable=too-few-public-methods
+    """replace rabbit mq"""
+    def __init__(self, exchange_name, _=None):
         self.exchange_name = exchange_name
-        self.OUT = asyncio.Queue()
+        self.OUT = asyncio.Queue()  # pylint: disable=invalid-name
 
     def publish(self, queue_name, message):
         async def _publish():
@@ -122,6 +125,7 @@ def patched_output_transport():
 @pytest.fixture()
 async def brick_runner(patched_input_transport, patched_output_transport):
     """set up a brick runner, with networking patched away"""
+    # pylint: disable=unused-argument
     guess_module_path = "titanfe.apps.control_peer.brick.BrickBaseDefinition.guess_module_path"
     with patch(guess_module_path, MagicMock(return_value=Path("n/a"))):
         brick_definition = BrickInstanceDefinition.from_gridmanager(
@@ -189,4 +193,3 @@ async def brick_runner(patched_input_transport, patched_output_transport):
         brick_runner = await BrickRunner.create("R-Test", brick_definition)
 
     yield brick_runner
-
diff --git a/test/brick_runner/test_runner.py b/test/brick_runner/test_runner.py
index 1431cad..bb7305a 100644
--- a/test/brick_runner/test_runner.py
+++ b/test/brick_runner/test_runner.py
@@ -12,13 +12,9 @@ Test the BrickRunner itself
 # pylint: disable=redefined-outer-name
 
 import asyncio
-import queue
-from collections import defaultdict
-from unittest.mock import patch
 
 import pytest
 
-
 from ujotypes import UjoStringC
 
 from titanfe.apps.brick_runner.value_mapping import Buffer
@@ -27,11 +23,8 @@ from titanfe.brick import BrickBase
 from titanfe.constants import DEFAULT_PORT
 
 
-
-
 @pytest.mark.asyncio
-async def test_basic_packet_processing(brick_runner
-):  # noqa: F811
+async def test_basic_packet_processing(brick_runner):  # noqa: F811
     """A Packet is taken from the input, processed within the Brick's module
     and ends up in the output with a new payload"""
 
@@ -43,7 +36,7 @@ async def test_basic_packet_processing(brick_runner
 
     brick_runner.brick.module.Brick = Brick
 
-    brick_runner.input._queues = {DEFAULT_PORT: DEFAULT_PORT}
+    brick_runner.input._queues = {DEFAULT_PORT: DEFAULT_PORT}  # pylint: disable=protected-access
 
     runner_run = asyncio.create_task(brick_runner.run())
 
@@ -55,12 +48,7 @@ async def test_basic_packet_processing(brick_runner
     await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet1)
     await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet2)
 
-    # {
-    #     'message': Packet(uid="Test", payload="NewValue"),
-    #     'exchange': 'dummy_exchange',
-    #     'routing_key': ('Next-Dummy:Input',)
-    # }
-
+    # pylint: disable=unused-variable
     queue_name, packet = await brick_runner.output.transport.OUT.get()
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet1.uid == packet.uid
@@ -81,9 +69,7 @@ async def test_basic_packet_processing(brick_runner
 
 @pytest.mark.skip("we have disabled the brick runner life cycle management")
 @pytest.mark.asyncio
-async def test_exit_when_idle(
-    brick_runner
-):  # noqa: F811
+async def test_exit_when_idle(brick_runner):  # noqa: F811
     """The runner should exit after being idle for a specific amount of time"""
     max_idle_seconds = 0.2
 
@@ -101,4 +87,4 @@ async def test_exit_when_idle(
     else:
         assert True
 
-    brick_runner.schedule_shutdown()
\ No newline at end of file
+    brick_runner.schedule_shutdown()
diff --git a/titanfe/apps/brick_runner/__main__.py b/titanfe/apps/brick_runner/__main__.py
index 681b829..8097967 100644
--- a/titanfe/apps/brick_runner/__main__.py
+++ b/titanfe/apps/brick_runner/__main__.py
@@ -53,4 +53,4 @@ def main():
 
 if __name__ == "__main__":
     main()
-    sys.exit(0)
\ No newline at end of file
+    sys.exit(0)
diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index 7f4cbf3..caad931 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -12,13 +12,15 @@ from copy import copy
 
 import janus
 
+from ujotypes import UjoBase
+
 from titanfe import log as logging
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe.brick import InletBrickBase
 from titanfe.constants import DEFAULT_PORT
 from titanfe.ujo_helper import python_to_ujo
 from titanfe.utils import get_module, time_delta_in_ms
-from ujotypes import UjoBase
+
 from .adapter import BrickAdapter, AdapterMeta
 from .output import Output
 from .packet import Packet
@@ -103,9 +105,9 @@ class Brick:
         return time_delta_in_ms(self.last_execution_start)
 
     async def process(self, packet):
+        """run the brick module for the given packet"""
         self._current_packet = packet
 
-        """run the brick module for the given packet in a separate thread"""
         self.log.info(
             "(%s) execute Brick: %s(%s) for %r",
             self.flow.name,
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index c073a49..98db5f1 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -13,7 +13,6 @@ import typing as T
 import aioamqp
 import aioamqp.protocol
 import aioamqp.channel
-from wheel.cli.pack import pack
 
 import titanfe.log
 from titanfe.apps.brick_runner.packet import Packet
@@ -122,11 +121,11 @@ class Transport:
         self._channel: aioamqp.channel.Channel = None
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        for c in (self._channel, self._connection):
-            if not c:
+        for conn in (self._channel, self._connection):
+            if not conn:
                 continue
             try:
-                c.close()
+                conn.close()
             except Exception:  # pylint: disable=broad-except
                 self.log.warning("failed to close channel/connection", exc_info=True)
 
diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
index b5921ea..815fa7f 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/titanfe/apps/brick_runner/output.py
@@ -43,9 +43,7 @@ class Output:
         exchange_name,
         logger=None,
     ):
-        self.log = (
-            logger.getChild("Output") if logger else titanfe.log.getLogger(__name__)
-        )
+        self.log = logger.getChild("Output") if logger else titanfe.log.getLogger(__name__)
 
         self.ports: T.Dict[str, Port] = {
             port_name: Port(
@@ -62,9 +60,7 @@ class Output:
 
     def put(self, packet, port):
         consumers = self[port].consumers
-        self.log.debug(
-            "publish %r on port %r to consumers: %r", packet, port, consumers
-        )
+        self.log.debug("publish %r on port %r to consumers: %r", packet, port, consumers)
         for consumer in consumers:
             next_packet = consumer.create_packet(packet)
             self.transport.publish(consumer.queue_name, next_packet)
@@ -84,9 +80,7 @@ class Consumer:
         return cls(
             queue_name=connection.target_queue,
             target_type=(
-                schema_to_type(
-                    connection.targetPort.schema, connection.targetPort.typeName
-                )
+                schema_to_type(connection.targetPort.schema, connection.targetPort.typeName)
                 if connection.targetPort.schema
                 else UJO_VARIANT_NONE
             ),
@@ -123,6 +117,7 @@ class Consumer:
 
 
 class Transport:
+    """RabbitMQ transport for publishing"""
     def __init__(self, exchange_name, logger):
         self.log = logger.getChild("Transport")
 
@@ -141,9 +136,9 @@ class Transport:
         self.channel = self.connection.channel()
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        for c in (self.channel, self.connection):
+        for conn in (self.channel, self.connection):
             try:
-                c.close()
+                conn.close()
             except Exception:  # pylint: disable=broad-except
                 self.log.warning("failed to close channel/connection", exc_info=True)
 
@@ -152,4 +147,4 @@ class Transport:
             self.exchange_name,
             routing_key=queue_name,
             body=bytes(packet),
-        )
\ No newline at end of file
+        )
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index b1d6bce..721b662 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -13,7 +13,7 @@ import signal
 
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe import log as logging
-from titanfe.utils import cancel_tasks
+
 from .brick import Brick
 from .input import Input
 from .metrics import MetricEmitter
@@ -100,7 +100,7 @@ class BrickRunner:
                         self.log.debug("process packet: %s", packet)
                         try:
                             await self.brick.process(packet)
-                        except Exception as exception:
+                        except Exception as exception:  # pylint: disable=broad-exception-caught
                             self.log.with_context.error(
                                 "processing packet: %s failed: %r",
                                 packet.payload,
@@ -116,6 +116,7 @@ class BrickRunner:
         self.log.with_context.warning("Exit")
 
     def schedule_shutdown(self, sig=signal.SIGABRT, frame=None):  # pylint: disable=unused-argument
+        """schedule the shutdown of the brick runner"""
         self.log.info(
             "Received signal %s - scheduling shutdown",
             signal.Signals(sig).name,  # pylint: disable=no-member
diff --git a/titanfe/testing/testrunner.py b/titanfe/testing/testrunner.py
index cd3d269..d338d2e 100644
--- a/titanfe/testing/testrunner.py
+++ b/titanfe/testing/testrunner.py
@@ -71,6 +71,7 @@ class Input(OriginalInput):
     """TestRunner: Input replacement"""
 
     def __init__(self):
+        # pylint: disable=super-init-not-called
         self.queue = janus.Queue()
 
         self.max_idle_time = 1  # give it at least a chance to run
@@ -248,9 +249,6 @@ class TestRunner:
 
         runner.brick.is_inlet = issubclass(runner.brick.module.Brick, InletBrickBase)
 
-        runner._tasks.append(asyncio.create_task(self.check_terminate(runner)))
-
-        runner.setup_completed.set()
         self.setup_completed.set()
 
         try:
-- 
GitLab


From 207b526ec898c094088465f0709052552f06c8f8 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 23 Oct 2023 16:24:03 +0200
Subject: [PATCH 13/29] set fixed version of pytest/pylint/flake8, so that we
 all will always use the same version (like the pipeline runners)

---
 requirements_dev.txt                | 9 ++++-----
 titanfe/apps/brick_runner/input.py  | 2 +-
 titanfe/apps/brick_runner/runner.py | 2 +-
 3 files changed, 6 insertions(+), 7 deletions(-)

diff --git a/requirements_dev.txt b/requirements_dev.txt
index 63b4c88..94ec7a9 100644
--- a/requirements_dev.txt
+++ b/requirements_dev.txt
@@ -2,11 +2,10 @@
 -r requirements_prod.txt
 
 # for development:
-flake8
-pylint
-pytest
-pytest-asyncio==0.18.3
+flake8 == 4.0.1
+pylint == 2.11.1
+pytest == 6.2.5
+pytest-asyncio == 0.18.3
 pytest-aiohttp
 pytest-localserver
 requests
-
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index 98db5f1..dd57c0a 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -86,7 +86,7 @@ class Input:
         try:
             if not isinstance(packet, Packet):
                 packet = Packet.from_bytes(packet)
-        except Exception:  # pylint: disable=broad-exception-caught  # who knows what might go wrong
+        except Exception:  # pylint: disable=broad-except  # who knows what might go wrong
             self.log.error("Failed to convert message to packet", exc_info=True)
             done_callback()
             return
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index 721b662..04a2319 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -100,7 +100,7 @@ class BrickRunner:
                         self.log.debug("process packet: %s", packet)
                         try:
                             await self.brick.process(packet)
-                        except Exception as exception:  # pylint: disable=broad-exception-caught
+                        except Exception as exception:  # pylint: disable=broad-except
                             self.log.with_context.error(
                                 "processing packet: %s failed: %r",
                                 packet.payload,
-- 
GitLab


From 59fae6a6cfc8b2018411075b3481f413cb0d186f Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 23 Oct 2023 16:26:10 +0200
Subject: [PATCH 14/29] fix import `from apps...` and reformat the file

---
 test/brick_runner/test_portMapping.py | 48 +++++++++++++++------------
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/test/brick_runner/test_portMapping.py b/test/brick_runner/test_portMapping.py
index 59b6ee4..6f10e4b 100644
--- a/test/brick_runner/test_portMapping.py
+++ b/test/brick_runner/test_portMapping.py
@@ -5,14 +5,14 @@
 # found in the LICENSE file in the root directory of this source tree.
 #
 # pylint: skip-file
-from apps.brick_runner.value_mapping import BufferDescription, MappingRules
+
+from ujotypes import UjoStringUTF8, UjoInt64, UjoFloat64, UjoBool, UjoMap
 
 from titanfe.brick import BrickBase
 from titanfe.testing import TestRunner
 from titanfe.ujo_helper import python_to_ujo
-from ujotypes import UjoStringUTF8, UjoInt64, UjoFloat64, UjoBool, UjoMap
 from titanfe.apps.brick_runner.packet import Buffer
-
+from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
 
 TestRunner.__test__ = False  # prevent PytestCollectionWarning "cannot collect test class"
 
@@ -24,7 +24,6 @@ class Brick(BrickBase):
 
 
 def test_update_buffer():
-
     buffer_description = BufferDescription({"someID": {"source": ["author", "name"]}})
     buffer = Buffer()
     result = python_to_ujo({"name": "test"})
@@ -35,7 +34,6 @@ def test_update_buffer():
 
 
 def test_update_buffer_flat():
-
     buffer_description = BufferDescription({"someID": {"source": ["Output"]}})
     buffer = Buffer()
     result = UjoStringUTF8("test")
@@ -46,28 +44,34 @@ def test_update_buffer_flat():
 
 
 def test_combining_buffer_and_result():
-    buffer_description = BufferDescription({
-        "this": {
-            "source": ["author", "name"],  # not in the buffer yet, get from result
-        },
-        "that": {
-            "source": [],  # pre-existing in the buffer, should be kept
-        },
-    })
-
-    buffer = Buffer.from_dict({
-        "that": "already exists",
-        "drop": "me now",  # no longer in the description, should be dropped from buffer
-    })
+    buffer_description = BufferDescription(
+        {
+            "this": {
+                "source": ["author", "name"],  # not in the buffer yet, get from result
+            },
+            "that": {
+                "source": [],  # pre-existing in the buffer, should be kept
+            },
+        }
+    )
+
+    buffer = Buffer.from_dict(
+        {
+            "that": "already exists",
+            "drop": "me now",  # no longer in the description, should be dropped from buffer
+        }
+    )
 
     result = python_to_ujo({"name": "test"})
 
     buffer = buffer.new_buffer_from_result(result, buffer_description)
 
-    expected_buffer = Buffer.from_dict({
-        "this": "test",
-        "that": "already exists",
-    })
+    expected_buffer = Buffer.from_dict(
+        {
+            "this": "test",
+            "that": "already exists",
+        }
+    )
     assert buffer == expected_buffer
 
 
-- 
GitLab


From e33fb24181dd1379d8f111d670a1c057a246256e Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 23 Oct 2023 16:42:49 +0200
Subject: [PATCH 15/29] use fixed versions of aioamqp/pika

---
 requirements_prod.txt | 4 ++--
 setup.py              | 3 ++-
 2 files changed, 4 insertions(+), 3 deletions(-)

diff --git a/requirements_prod.txt b/requirements_prod.txt
index 1a7f9cd..face9e9 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -15,8 +15,8 @@ dataclasses-json
 requests
 docopt
 pycryptodome
-aioamqp
-pika
+aioamqp == 0.15.0
+pika == 1.3.2
 
 # linux only:
 uvloop == 0.13.*;platform_system=="Linux"
diff --git a/setup.py b/setup.py
index 08d639f..9a418b0 100644
--- a/setup.py
+++ b/setup.py
@@ -79,7 +79,8 @@ setup(
         "dataclasses-json",
         "requests",
         "docopt",
-        "pika",
+        "aioamqp == 0.15.0",
+        "pika == 1.3.2",
         "pycryptodome",
     ],
     ext_modules=[],
-- 
GitLab


From feab1a0bb466fb1c148b0dfc0f2b5ac1f48334c7 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Tue, 24 Oct 2023 09:08:46 +0200
Subject: [PATCH 16/29] fix the TestRunner

---
 titanfe/testing/testrunner.py | 24 ++++++++++--------------
 1 file changed, 10 insertions(+), 14 deletions(-)

diff --git a/titanfe/testing/testrunner.py b/titanfe/testing/testrunner.py
index d338d2e..0e2f7ee 100644
--- a/titanfe/testing/testrunner.py
+++ b/titanfe/testing/testrunner.py
@@ -13,6 +13,7 @@ Fixtures for BrickRunner-Tests
 import asyncio
 import inspect
 import logging
+import queue
 import sys
 import threading
 from typing import Union
@@ -93,11 +94,11 @@ class Input(OriginalInput):
 class Output:
     """Output replacement"""
     def __init__(self):
-        self.queue = janus.Queue()
-        self.get = self.queue.sync_q.get
+        self.queue = queue.Queue()
+        self.get = self.queue.get
 
-    async def put(self, packet, port):
-        await self.queue.async_q.put((port, packet.payload))
+    def put(self, packet, port):
+        self.queue.put((port, packet.payload))
 
     async def close(self):
         pass
@@ -213,9 +214,8 @@ class TestRunner:
 
     async def _create_and_run_runner(self, brick_class_or_path_to_module, parameters):
         runner = self.runner
-        runner.gridmanager = GridManagerDummy()
+
         runner.metric_emitter = MetricEmitterDummy()
-        runner.server = MagicMock()
 
         is_brick = (
             inspect.isclass(brick_class_or_path_to_module)
@@ -224,13 +224,9 @@ class TestRunner:
 
         instance_definition = BrickInstanceDefinition.from_gridmanager(self.definition)
 
-        runner.message_transport = MagicMock()
-
         runner.input = Input()
         runner.output = Output()
 
-        runner.metric_emitter.set_metadata_from_runner(runner)
-
         if is_brick:
             instance_definition.base.module_path = "sys"
         else:
@@ -253,10 +249,10 @@ class TestRunner:
 
         try:
             await runner.run()
-        except TypeError:
-            # TODO: figure out why `self.input` is occasionally None during brickrunner.shutdown()
-            #       of course then `await self.input.close()` fails :/
-            pass
+        except Exception as exception:
+            logging.error("Brick runner execution failed", exc_info=True)
+            raise RuntimeError("Brick runner execution failed") from exception
+
         while not self.terminate.is_set():
             await asyncio.sleep(0.1)
 
-- 
GitLab


From 1898cc08246abfa77d9a12217c853788b2e0cd68 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Tue, 24 Oct 2023 09:53:51 +0200
Subject: [PATCH 17/29] pin versions of all requirements to ensure same results
 everywhere

---
 requirements_dev.txt  |  5 ++---
 requirements_prod.txt | 12 ++++++------
 setup.py              | 12 ++++++------
 3 files changed, 14 insertions(+), 15 deletions(-)

diff --git a/requirements_dev.txt b/requirements_dev.txt
index 94ec7a9..15e3d6d 100644
--- a/requirements_dev.txt
+++ b/requirements_dev.txt
@@ -5,7 +5,6 @@
 flake8 == 4.0.1
 pylint == 2.11.1
 pytest == 6.2.5
+pytest-aiohttp == 1.0.4
 pytest-asyncio == 0.18.3
-pytest-aiohttp
-pytest-localserver
-requests
+pytest-localserver == 0.8.0
diff --git a/requirements_prod.txt b/requirements_prod.txt
index face9e9..6ab4083 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -1,8 +1,9 @@
 # production
 ujotypes >= 0.1.2
 ujoschema >= 0.3.66
-ruamel.yaml
-janus
+dataclasses-json==0.5.14
+ruamel.yaml == 0.17.*
+janus == 1.0.0
 aiokafka == 0.5.2 # fixed due to the kafka-python later version require
 kafka-python == 1.4.6
 elasticsearch == 7.8.*  # strangely 7.9.0 is missing the async parts that were introduced with 7.8.0?
@@ -11,10 +12,9 @@ starlette ==0.20.4
 uvicorn == 0.9.*  # fixed due to the uvloop later versions require
 aiohttp >= 3.6.2
 aiohttp-requests >= 0.1.3  # as required for elasticsearch async
-dataclasses-json
-requests
-docopt
-pycryptodome
+requests == 2.31.0
+docopt == 0.6.2
+pycryptodome == 3.19.0
 aioamqp == 0.15.0
 pika == 1.3.2
 
diff --git a/setup.py b/setup.py
index 9a418b0..bb59cc9 100644
--- a/setup.py
+++ b/setup.py
@@ -65,8 +65,8 @@ setup(
     install_requires=[
         "ujotypes >=0.1.2",
         "ujoschema >=0.3.66",
-        "ruamel.yaml",
-        "janus",
+        "ruamel.yaml == 0.17.*",
+        "janus == 1.0.0",
         "aiokafka == 0.5.2",  # fixed due to the kafka-python version required by later versions
         "kafka-python == 1.4.6",  # aiokafka 0.5.2 requires this version
         "elasticsearch == 7.8.*",
@@ -76,12 +76,12 @@ setup(
         'uvloop == 0.13.* ;platform_system=="Linux"',
         "aiohttp >= 3.6.2",
         "aiohttp-requests >= 0.1.3",
-        "dataclasses-json",
-        "requests",
-        "docopt",
+        "dataclasses-json == 0.5.14",
+        "requests == 2.31.0",
+        "docopt == 0.6.2",
         "aioamqp == 0.15.0",
         "pika == 1.3.2",
-        "pycryptodome",
+        "pycryptodome == 3.19.0",
     ],
     ext_modules=[],
 )
-- 
GitLab


From 20e6133f01a12baa8d9b5a6ef7118309b7f32e32 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Tue, 24 Oct 2023 10:48:57 +0200
Subject: [PATCH 18/29] fix TestRunner input occasionally not stopping (because
 the _getter.cancel hits after the _getter was just awaited and before it's
 recreated)

---
 titanfe/apps/brick_runner/input.py |  6 +++++-
 titanfe/testing/testrunner.py      | 21 ++++++++++-----------
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index dd57c0a..145b050 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -36,6 +36,7 @@ class Input:
         self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
 
         self._getter = asyncio.Future()
+        self._stop = False
         self._queues = input_queues
         self._packets = asyncio.Queue()
         self._packet_done_callbacks = {}
@@ -44,7 +45,7 @@ class Input:
         return self
 
     async def __anext__(self) -> Packet:
-        while True:  # this can go once we don't need the `continue` any longer
+        while not self._stop:  # this can go once we don't need the `continue` any longer
             self._getter = asyncio.create_task(self._packets.get())
             try:
                 self.log.debug("wait for packet")
@@ -63,6 +64,7 @@ class Input:
                 packet.port = port
                 packet.update_input_exit()
                 return packet
+        raise StopAsyncIteration
 
     async def start(self):
         self.log.debug("start consumers: %r", self._queues)
@@ -74,10 +76,12 @@ class Input:
         )
 
     async def stop(self):
+        """stop consuming queues"""
         self.log.debug("stop consumers: %r", self._queues)
         await asyncio.gather(
             *(self.transport.stop_consuming(queue) for queue in self._queues.values())
         )
+        self._stop = True
         self._getter.cancel()
 
     async def _on_new_message(self, port, packet, done_callback: T.Callable):
diff --git a/titanfe/testing/testrunner.py b/titanfe/testing/testrunner.py
index 0e2f7ee..a63b600 100644
--- a/titanfe/testing/testrunner.py
+++ b/titanfe/testing/testrunner.py
@@ -75,10 +75,11 @@ class Input(OriginalInput):
         # pylint: disable=super-init-not-called
         self.queue = janus.Queue()
 
-        self.max_idle_time = 1  # give it at least a chance to run
+        self.max_idle_time = 0.25  # give it at least a chance to run
         self.log = LOG
 
         self._getter = asyncio.Future()
+        self._stop = False
         self._packets = self.queue.async_q
 
     async def start(self):
@@ -86,6 +87,11 @@ class Input(OriginalInput):
 
     async def stop(self):
         self._getter.cancel()
+        self._stop = True
+
+    def stop_sync(self):
+        self._getter.cancel()
+        self._stop = True
 
     def mark_done(self, _):
         self._packets.task_done()
@@ -202,11 +208,6 @@ class TestRunner:
     def __exit__(self, exc_type, exc_val, exc_tb):
         self.stop()
 
-    async def check_terminate(self, runner):
-        while not self.terminate.is_set():
-            await asyncio.sleep(0.2)
-        runner.brick.terminate()
-
     def run_async_create_and_run(self):
         asyncio.run(
             self._create_and_run_runner(self.brick_class_or_path_to_module, self.parameters)
@@ -253,9 +254,6 @@ class TestRunner:
             logging.error("Brick runner execution failed", exc_info=True)
             raise RuntimeError("Brick runner execution failed") from exception
 
-        while not self.terminate.is_set():
-            await asyncio.sleep(0.1)
-
     @property
     def input(self):
         """add input on the default port by calling `input.put()`,
@@ -300,6 +298,7 @@ class TestRunner:
         self.thread.start()
 
     def stop(self):
-        self.runner.input._getter.cancel()  # pylint: disable=protected-access
-        self.terminate.set()
+        if hasattr(self.runner.brick, "terminate"):
+            self.runner.brick.terminate()
+        self.runner.input.stop_sync()  # pylint: disable=protected-access
         self.thread.join()
-- 
GitLab


From 9ed8d6527686d083fff2dcc3c1b0f50612e3c77b Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Wed, 1 Nov 2023 15:25:03 +0100
Subject: [PATCH 19/29] use RMQ instead of Kafka for transporting log messages

---
 test/test_logging.py                |  30 +++----
 titanfe/apps/brick_runner/input.py  |  80 +----------------
 titanfe/apps/brick_runner/output.py |  42 ++-------
 titanfe/apps/brick_runner/runner.py |  11 ++-
 titanfe/apps/control_peer/brick.py  |   2 +-
 titanfe/log.py                      |  59 ++++--------
 titanfe/rabbitmq.py                 | 133 ++++++++++++++++++++++++++++
 7 files changed, 179 insertions(+), 178 deletions(-)
 create mode 100644 titanfe/rabbitmq.py

diff --git a/test/test_logging.py b/test/test_logging.py
index e1a42b4..16565ca 100644
--- a/test/test_logging.py
+++ b/test/test_logging.py
@@ -9,10 +9,12 @@
 
 import logging
 import platform
+from queue import Queue
 from unittest.mock import patch
 
 from kafka import KafkaProducer
 
+from log import RMQ_LOG_EXCHANGE
 from ujotypes import read_buffer
 
 from titanfe import log
@@ -35,15 +37,15 @@ def test_log_record_is_titan_specific():
 def test_logging_sends_contextual_bin_ujo_to_kafka():
     """ assure the log record is sent to kafka as a binary ujo map"""
 
-    kafka_location = "titanfe.log.KafkaProducer"
+    rmq_location = "titanfe.log.RMQ"
 
-    topic = None
-    message = None
 
-    class KafkaDummy(KafkaProducer):
+    class RMQDummy():
         """ a Kafka Dummy that sets the nonlocal variables
             topic and message to the according values on send """
 
+        published = Queue()
+
         # pylint: disable=super-init-not-called, arguments-differ
         def __init__(self, *args, **kwargs):
             pass
@@ -51,21 +53,12 @@ def test_logging_sends_contextual_bin_ujo_to_kafka():
         def close(self, *args, **kwargs):
             pass
 
-        def flush(self):
-            pass
-
-        def send(self, *args, **kwargs):
-            nonlocal topic, message
-            topic = args[0] or kwargs.get("topic")
-            message = args[1] or kwargs.get("value")
+        def publish(self, exchange_name, queue_name, message):
+            self.published.put((exchange_name, queue_name, message))
 
-    with patch(kafka_location, KafkaDummy):
-        configuration.kafka_bootstrap_servers = "127.0.0.1"
-        configuration.no_kafka_today = False
+    with patch(rmq_location, RMQDummy):
         log.initialize("titanfe-test")
 
-    configuration.no_kafka_today = True
-
     logger = log.getLogger("titanfe.test", context=log.FlowContext("FUID", "FLOW", "BUID", "BRICK"))
     logger.error("Test")
 
@@ -82,5 +75,8 @@ def test_logging_sends_contextual_bin_ujo_to_kafka():
         'Timestamp': Ignored()
     }
 
-    assert topic == "titan.logs"
+    exchange, queue, message = RMQDummy.published.get(timeout=0.1)
+
+    assert exchange == RMQ_LOG_EXCHANGE
+    assert queue == "titanfe.test"
     assert read_buffer(message).as_pyobject() == expected
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index 145b050..b875cc2 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -27,11 +27,12 @@ class Input:
     def __init__(
         self,
         input_queues: T.Dict[Port, str],
+        transport,
         max_idle_time=60,
         logger=None,
     ):
         self.log = logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
-        self.transport = Transport(self.log.getChild("Transport"))
+        self.transport = transport
 
         self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
 
@@ -109,80 +110,3 @@ class Input:
         self._packets.task_done()
 
 
-class Transport:
-    """RabbitMQ transport for packets"""
-
-    # TODO: make "robust" - handle reconnect and stuff
-    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
-
-    def __init__(self, exchange_name, logger=None):
-        self.log = (
-            logger.getChild("transport.RabbitMQ") if logger else titanfe.log.getLogger(__name__)
-        )
-        self.exchange_name = exchange_name
-        self._consumers = {}
-        self._connection: aioamqp.protocol.AmqpProtocol = None
-        self._channel: aioamqp.channel.Channel = None
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        for conn in (self._channel, self._connection):
-            if not conn:
-                continue
-            try:
-                conn.close()
-            except Exception:  # pylint: disable=broad-except
-                self.log.warning("failed to close channel/connection", exc_info=True)
-
-    async def connect(self):
-        """create new rabbit mq connection/channel"""
-        _, self._connection = await aioamqp.connect(
-            host=config.rabbitmq_params.host,
-            port=config.rabbitmq_params.port,
-            login=config.rabbitmq_params.user,
-            password=config.rabbitmq_params.password,
-            heartbeat=60,
-        )
-        self.log.debug("connected to rabbitmq")
-        self._channel = await self._connection.channel()
-        await self._channel.basic_qos(prefetch_count=2)
-
-    async def disconnect(self):
-        """disconnect"""
-        if not self._connection:
-            return
-
-        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
-
-        await self._channel.close()
-        await self._connection.close()
-        self.log.debug("disconnected from rabbitmq")
-
-    async def connection(self):
-        if not self._connection:
-            await self.connect()
-        return self._connection
-
-    async def channel(self) -> aioamqp.channel.Channel:
-        await self.connection()
-        return self._channel
-
-    async def start_consuming(self, queue_name, on_new_message_callback):
-        """start consuming the given queue"""
-
-        async def callback_wrapper(msgchannel, body, envelope, _):
-            async def done_callback():
-                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
-
-            await on_new_message_callback(body, done_callback)
-
-        channel = await self.channel()
-
-        await channel.queue_declare(queue_name, durable=True)
-        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
-        self._consumers[queue_name] = consumer_tag["consumer_tag"]
-
-    async def stop_consuming(self, queue_name):
-        """stop consuming the queue"""
-        tag = self._consumers.pop(queue_name)
-        channel = await self.channel()
-        await channel.basic_cancel(tag)
diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
index 815fa7f..5ab0ffa 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/titanfe/apps/brick_runner/output.py
@@ -40,6 +40,7 @@ class Output:
     def __init__(
         self,
         output_connections: T.Dict["PortName", T.List[Connection]],
+        transport,
         exchange_name,
         logger=None,
     ):
@@ -53,17 +54,18 @@ class Output:
             for port_name, targets in output_connections.items()
         }
 
-        self.transport = Transport(exchange_name, self.log)
+        self.transport = transport
+        self.exchange_name = exchange_name
 
     def __getitem__(self, port) -> Port:
         return self.ports[port]
 
     def put(self, packet, port):
         consumers = self[port].consumers
-        self.log.debug("publish %r on port %r to consumers: %r", packet, port, consumers)
         for consumer in consumers:
             next_packet = consumer.create_packet(packet)
-            self.transport.publish(consumer.queue_name, next_packet)
+            self.log.debug("publish %r on port %r to consumer: %r", next_packet, port, consumers)
+            self.transport.publish(self.exchange_name, consumer.queue_name, bytes(next_packet))
 
 
 @dataclass
@@ -114,37 +116,3 @@ class Consumer:
 
         packet.payload = new_payload
         return packet
-
-
-class Transport:
-    """RabbitMQ transport for publishing"""
-    def __init__(self, exchange_name, logger):
-        self.log = logger.getChild("Transport")
-
-        self.exchange_name = exchange_name
-
-        self.connection = pika.BlockingConnection(
-            pika.ConnectionParameters(
-                host=config.rabbitmq_params.host,
-                port=config.rabbitmq_params.port,
-                credentials=pika.credentials.PlainCredentials(
-                    config.rabbitmq_params.user, config.rabbitmq_params.password
-                ),
-            )
-        )
-
-        self.channel = self.connection.channel()
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        for conn in (self.channel, self.connection):
-            try:
-                conn.close()
-            except Exception:  # pylint: disable=broad-except
-                self.log.warning("failed to close channel/connection", exc_info=True)
-
-    def publish(self, queue_name, packet):
-        self.channel.basic_publish(
-            self.exchange_name,
-            routing_key=queue_name,
-            body=bytes(packet),
-        )
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index 04a2319..4eb4634 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -11,8 +11,10 @@ import asyncio
 import os
 import signal
 
+
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
 from titanfe import log as logging
+from titanfe.rabbitmq import RMQ, AsyncRMQ
 
 from .brick import Brick
 from .input import Input
@@ -64,13 +66,15 @@ class BrickRunner:
         )
 
         self.input = Input(
-            brick_definition.input_queues,
+            input_queues=brick_definition.input_queues,
+            transport=AsyncRMQ(),
             max_idle_time=brick_definition.runtime_parameters.exit_after_idle_seconds,
             logger=self.log,
         )
         self.output = Output(
-            brick_definition.connections.output,
-            brick_definition.message_exchange,
+            output_connections=brick_definition.connections.output,
+            transport=RMQ(),
+            exchange_name=brick_definition.message_exchange,
             logger=self.log,
         )
 
@@ -130,7 +134,6 @@ class BrickRunner:
             if not self.brick.is_inlet:
                 await self.input.stop()
 
-            logging.flush_kafka_log_handler()
             await self.metric_emitter.stop()
 
         asyncio.create_task(shutdown())
diff --git a/titanfe/apps/control_peer/brick.py b/titanfe/apps/control_peer/brick.py
index 8231673..f458bc1 100644
--- a/titanfe/apps/control_peer/brick.py
+++ b/titanfe/apps/control_peer/brick.py
@@ -351,7 +351,7 @@ class BrickInstanceDefinition:
 
     @property
     def message_exchange(self):
-        return truncate(self.flow.uid + ":" + self.flow.name, 255)
+        return truncate("openfba." + self.flow.uid + ":" + self.flow.name, 255)
 
     @property
     def input_queues(self):
diff --git a/titanfe/log.py b/titanfe/log.py
index 2d61436..e458433 100644
--- a/titanfe/log.py
+++ b/titanfe/log.py
@@ -18,11 +18,12 @@ import logging.config
 from datetime import datetime
 
 import ruamel.yaml
-from kafka import KafkaProducer
 
-from titanfe.config import configuration
+from titanfe.rabbitmq import RMQ
 from titanfe.ujo_helper import py_to_ujo_bytes
 
+RMQ_LOG_EXCHANGE = "openfba.logging"
+
 
 class TitanLogRecord(logging.LogRecord):  # pylint: disable=too-few-public-methods
     """A log record - Titan style"""
@@ -145,13 +146,12 @@ def initialize(service=""):
         log_config = ruamel.yaml.safe_load(cfile)
         logging.config.dictConfig(log_config)
 
-    if configuration.kafka_bootstrap_servers and not configuration.no_kafka_today:
-        kafka_handler = KafkaLogHandler(
-            bootstrap_server=configuration.kafka_bootstrap_servers,
-            topic=configuration.kafka_log_topic,
-        )
-        root = logging.getLogger("titanfe")
-        root.addHandler(kafka_handler)
+    rmq_publisher = RMQ()
+    rmq_publisher.channel.exchange_declare(RMQ_LOG_EXCHANGE, exchange_type="topic", durable=True)
+
+    rmq_handler = RabbitMQLogHandler(rmq_publisher, exchange_name=RMQ_LOG_EXCHANGE)
+    root = logging.getLogger()
+    root.addHandler(rmq_handler)
 
 
 def add_logging_level(level, level_name, method_name=None):
@@ -179,16 +179,6 @@ def add_logging_level(level, level_name, method_name=None):
     setattr(logging, method_name, log_to_root)
 
 
-def flush_kafka_log_handler():
-    """ "Flush messages sent to KafkaLogHandler and
-    suppress warnings from kafka
-    --> called during shutdown of brick runner"""
-    for handler in logging.getLogger("titanfe").handlers:
-        if isinstance(handler, KafkaLogHandler):
-            handler.flush()
-    logging.getLogger("kafka").propagate = False
-
-
 class UjoBinFormatter(logging.Formatter):
     """Format log records as an UjoBinary"""
 
@@ -232,42 +222,29 @@ class UjoBinFormatter(logging.Formatter):
         return ujo_bin_map
 
 
-class KafkaLogHandler(logging.Handler):
-    """Stream LogRecords to Kafka
+class RabbitMQLogHandler(logging.Handler):
+    """Stream LogRecords via RabbitMQ"""
 
-    Arguments:
-        bootstrap_server (str): 'Host:Port' of a kafka bootstrap server
-        topic (str): the kafka topic to produce into
-    """
-
-    def __init__(self, bootstrap_server, topic):
+    def __init__(self, publisher: RMQ, exchange_name: str):
         logging.Handler.__init__(self)
         self.formatter = UjoBinFormatter()
-        self.topic = topic
-        self.producer = KafkaProducer(bootstrap_servers=bootstrap_server)
+        self.exchange_name = exchange_name
+        self.producer = publisher
 
     def emit(self, record):
         """emits the record"""
-        if record.name.startswith("kafka"):
-            # drop kafka logging to avoid infinite recursion
-            return
+        # if record.name.startswith("???"):
+        #     # drop kafka logging to avoid infinite recursion
+        #     return
 
         try:
             log_message = self.format(record)
-            self.producer.send(self.topic, log_message)
+            self.producer.publish(self.exchange_name, record.name, log_message)
         except Exception:  # pylint: disable=broad-except
             exc_info = sys.exc_info()
             traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, sys.stderr)
             del exc_info
 
-    def flush(self):
-        self.producer.flush()
-
-    def close(self):
-        self.producer.flush()
-        self.producer.close()
-        logging.Handler.close(self)
-
 
 logging.setLogRecordFactory(TitanLogRecord)
 
diff --git a/titanfe/rabbitmq.py b/titanfe/rabbitmq.py
new file mode 100644
index 0000000..f491dfa
--- /dev/null
+++ b/titanfe/rabbitmq.py
@@ -0,0 +1,133 @@
+import asyncio
+
+import aioamqp
+import aioamqp.channel
+
+import pika
+from pika.adapters.blocking_connection import BlockingConnection, BlockingChannel
+
+from titanfe.config import configuration as config
+
+
+# TODO: Handle Reconnect?
+
+class RMQ:
+    """RabbitMQ transport for publishing"""
+    _instance = None  # singleton
+
+    def __new__(cls, *args, **kwargs):
+        if cls._instance is None:
+            cls._instance = super().__new__(cls, *args, **kwargs)
+        return cls._instance
+
+    def __init__(self):
+        self.connection = pika.BlockingConnection(
+            pika.ConnectionParameters(
+                host=config.rabbitmq_params.host,
+                port=config.rabbitmq_params.port,
+                credentials=pika.credentials.PlainCredentials(
+                    config.rabbitmq_params.user, config.rabbitmq_params.password
+                ),
+            )
+        )
+
+        self.channel = self.connection.channel()
+
+    def close(self):
+        for conn in (self.channel, self.connection):
+            try:
+                conn.close()
+            except Exception:  # pylint: disable=broad-except
+                pass
+        self._instance = None
+
+    def publish(self, exchange_name, queue_name, message):
+        self.channel.basic_publish(
+            exchange=exchange_name,
+            routing_key=queue_name,
+            body=message,
+        )
+
+
+class AsyncRMQ:
+    """RabbitMQ transport for packets"""
+
+    # TODO: make "robust" - handle reconnect and stuff
+    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
+
+    _instance = None  # singleton
+
+    def __new__(cls, *args, **kwargs):
+        if cls._instance is None:
+            cls._instance = super().__new__(cls, *args, **kwargs)
+        return cls._instance
+
+    def __init__(self):
+        self._connection: aioamqp.protocol.AmqpProtocol = None
+        self._channel: aioamqp.channel.Channel = None
+        self._consumers = {}
+
+    def close(self):
+        for conn in (self._channel, self._connection):
+            if not conn:
+                continue
+            try:
+                conn.close()
+            except Exception:  # pylint: disable=broad-except
+                pass
+        self._instance = None
+
+    async def connect(self):
+        """create new rabbit mq connection/channel"""
+        _, self._connection = await aioamqp.connect(
+            host=config.rabbitmq_params.host,
+            port=config.rabbitmq_params.port,
+            login=config.rabbitmq_params.user,
+            password=config.rabbitmq_params.password,
+            heartbeat=60,
+        )
+
+        self._channel = await self._connection.channel()
+        await self._channel.basic_qos(prefetch_count=2)
+
+    async def disconnect(self):
+        """disconnect"""
+        if not self._connection:
+            return
+
+        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
+
+        await self._channel.close()
+        await self._connection.close()
+
+    async def connection(self):
+        if not self._connection:
+            await self.connect()
+        return self._connection
+
+    async def channel(self) -> aioamqp.channel.Channel:
+        await self.connection()
+        return self._channel
+
+    async def start_consuming(self, queue_name, on_new_message_callback):
+        """start consuming the given queue"""
+
+        async def callback_wrapper(msgchannel, body, envelope, _):
+            async def done_callback():
+                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
+
+            await on_new_message_callback(body, done_callback)
+
+        channel = await self.channel()
+
+        await channel.queue_declare(queue_name, durable=True)
+        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
+        self._consumers[queue_name] = consumer_tag["consumer_tag"]
+
+    async def stop_consuming(self, queue_name):
+        """stop consuming the queue"""
+        tag = self._consumers.pop(queue_name)
+        channel = await self.channel()
+        await channel.basic_cancel(tag)
+
+
-- 
GitLab


From b9b726250067da22a5ba0a286de9445f1d2c37d5 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 2 Nov 2023 08:35:27 +0100
Subject: [PATCH 20/29] use rmq instead of kafka for streaming metrics

---
 test/brick_runner/conftest.py         |  36 ++------
 test/brick_runner/test_metrics.py     |  55 +++++-------
 test/brick_runner/test_runner.py      |  15 +++-
 test/test_logging.py                  |  17 ++--
 titanfe/apps/brick_runner/brick.py    |   4 +-
 titanfe/apps/brick_runner/input.py    |   6 --
 titanfe/apps/brick_runner/metrics.py  | 122 +++++---------------------
 titanfe/apps/brick_runner/output.py   |   2 -
 titanfe/apps/brick_runner/runner.py   |   5 +-
 titanfe/apps/control_peer/config.yaml |   3 -
 titanfe/config.py                     |  14 +--
 titanfe/log.py                        |   8 +-
 titanfe/rabbitmq.py                   |  20 +++--
 13 files changed, 89 insertions(+), 218 deletions(-)

diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index d840c86..ee1d1f6 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -21,7 +21,6 @@ import pytest
 from titanfe.apps.brick_runner.metrics import MetricEmitter
 from titanfe.apps.brick_runner.runner import BrickRunner
 from titanfe.apps.control_peer.brick import BrickInstanceDefinition
-from titanfe.config import configuration
 
 # pylint: disable=redefined-outer-name
 
@@ -29,16 +28,6 @@ logging.basicConfig(
     stream=sys.stdout, format="%(asctime)s %(levelname)s %(name)s: %(message)s", level=logging.ERROR
 )
 
-configuration.no_kafka_today = True
-
-
-async def async_magic():
-    pass
-
-
-# extend the mock object to be awaitable
-MagicMock.__await__ = lambda *args, **kwargs: async_magic().__await__()
-
 
 @pytest.fixture()
 def metric_emitter():
@@ -51,23 +40,12 @@ def metric_emitter():
         "runner": "R-Test",
     }
 
-    emitter = MetricEmitter(metrics_meta, logger=None)
+    emitter = MetricEmitter(metrics_meta, transport=MagicMock(), logger=None)
     emitter.log = MagicMock()
-    emitter.kafka = MagicMock()
 
     return emitter
 
 
-class GridManagerDummy:
-    """Mocks the Gridmanager connection"""
-
-    # pylint: disable=too-few-public-methods
-
-    request_scaling = MagicMock()
-    deregister_runner = MagicMock()
-    register_runner = MagicMock()
-
-
 class InputTransportDouble:
     """replace rabbit mq"""
     def __init__(self, *_, **__):
@@ -96,27 +74,27 @@ class InputTransportDouble:
 
 @pytest.fixture()
 def patched_input_transport():
-    input_transport_path = "titanfe.apps.brick_runner.input.Transport"
+    input_transport_path = "titanfe.apps.brick_runner.runner.AsyncRMQ"
     with patch(input_transport_path, InputTransportDouble):
         yield
 
 
 class OutputTransportDouble:  # pylint: disable=too-few-public-methods
     """replace rabbit mq"""
-    def __init__(self, exchange_name, _=None):
-        self.exchange_name = exchange_name
+    def __init__(self):
         self.OUT = asyncio.Queue()  # pylint: disable=invalid-name
+        self.channel = MagicMock()
 
-    def publish(self, queue_name, message):
+    def publish(self, exchange_name, queue_name, message):
         async def _publish():
-            await self.OUT.put((queue_name, message))
+            await self.OUT.put((exchange_name, queue_name, message))
 
         asyncio.create_task(_publish())
 
 
 @pytest.fixture()
 def patched_output_transport():
-    input_transport_path = "titanfe.apps.brick_runner.output.Transport"
+    input_transport_path = "titanfe.apps.brick_runner.runner.RMQ"
     with patch(input_transport_path, OutputTransportDouble):
         yield
 
diff --git a/test/brick_runner/test_metrics.py b/test/brick_runner/test_metrics.py
index 17cc3d1..2a66301 100644
--- a/test/brick_runner/test_metrics.py
+++ b/test/brick_runner/test_metrics.py
@@ -8,13 +8,11 @@
 """
 Tests around the emitting of metric data
 """
-
+import json
 # pylint: disable=redefined-outer-name
 
 import platform
 
-import pytest
-
 from titanfe.apps.brick_runner.packet import Packet
 
 
@@ -23,13 +21,14 @@ class Ignored:   # pylint: disable=too-few-public-methods
         return True
 
 
-@pytest.mark.asyncio
 async def test_packet_metrics(metric_emitter):  # noqa: F811
     """assure the metrics for packets are complete"""
     packet = Packet(uid="P-Test")
 
-    topic = "titanfe.metrics"
-    metrics = {
+    expected_exchange = "openfba.metrics"
+    expected_queue = "openfba.metrics.packets"
+
+    expected_metrics = {
         "flow": "Test",
         "brick": "BrickDummy",
         "brick_type": "DummyType",
@@ -47,36 +46,20 @@ async def test_packet_metrics(metric_emitter):  # noqa: F811
         "at_outlet": False,
     }
 
-    await metric_emitter.emit_packet_metrics(packet, duration=1)
-    metric_emitter.kafka.send.assert_called_once_with(topic, metrics)
+    metric_emitter.emit_packet_metrics(packet, duration=1)
+    exchange, queue, metrics = metric_emitter.transport.publish.call_args[0]
+    metrics = json.loads(metrics.decode("utf-8"))
+    assert expected_exchange == exchange
+    assert expected_queue == queue
+    assert expected_metrics == metrics
 
 
-@pytest.mark.asyncio
-async def test_queue_metrics(metric_emitter):  # noqa: F811
-    """assure the metrics for queues are complete"""
-    topic = "titanfe.metrics"
-    metrics = {
-        "flow": "Test",
-        "brick": "BrickDummy",
-        "brick_type": "DummyType",
-        "brick_family": "DummyFamily",
-        "runner": "R-Test",
-        "host": platform.node(),
-        "timestamp": Ignored(),
-        "content_type": "titan-queue-metrics",
-        "queue_name": "Test",
-        "queue_length": 1,
-    }
-
-    await metric_emitter.emit_queue_metrics(queue_name="Test", queue_length=1)
-    metric_emitter.kafka.send.assert_called_once_with(topic, metrics)
-
-
-@pytest.mark.asyncio
 async def test_brick_metrics(metric_emitter):  # noqa: F811
     """assure the metrics for brick execution times are complete"""
-    topic = "titanfe.metrics"
-    metrics = {
+    expected_exchange = "openfba.metrics"
+    expected_queue = "openfba.metrics.bricks"
+
+    expected_metrics = {
         "flow": "Test",
         "brick": "BrickDummy",
         "brick_type": "DummyType",
@@ -88,5 +71,9 @@ async def test_brick_metrics(metric_emitter):  # noqa: F811
         "execution_time": 1.0
     }
 
-    await metric_emitter.emit_brick_metrics(execution_time=1.0)
-    metric_emitter.kafka.send.assert_called_once_with(topic, metrics)
+    metric_emitter.emit_brick_metrics(execution_time=1.0)
+    exchange, queue, metrics = metric_emitter.transport.publish.call_args[0]
+    metrics = json.loads(metrics.decode("utf-8"))
+    assert expected_exchange == exchange
+    assert expected_queue == queue
+    assert expected_metrics == metrics
diff --git a/test/brick_runner/test_runner.py b/test/brick_runner/test_runner.py
index bb7305a..cb372ca 100644
--- a/test/brick_runner/test_runner.py
+++ b/test/brick_runner/test_runner.py
@@ -49,17 +49,26 @@ async def test_basic_packet_processing(brick_runner):  # noqa: F811
     await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet2)
 
     # pylint: disable=unused-variable
-    queue_name, packet = await brick_runner.output.transport.OUT.get()
+    _, queue_name, packet = await asyncio.wait_for(
+        brick_runner.output.transport.OUT.get(), timeout=1
+    )
+    packet = Packet.from_bytes(packet)
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet1.uid == packet.uid
 
-    queue_name, packet = await brick_runner.output.transport.OUT.get()
+    _, queue_name, packet = await asyncio.wait_for(
+        brick_runner.output.transport.OUT.get(), timeout=1
+    )
+    packet = Packet.from_bytes(packet)
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet2.uid == packet.uid
 
     await brick_runner.input.transport.IN[DEFAULT_PORT].put(input_packet3)
 
-    queue_name, packet = await brick_runner.output.transport.OUT.get()
+    _, queue_name, packet = await asyncio.wait_for(
+        brick_runner.output.transport.OUT.get(), timeout=1
+    )
+    packet = Packet.from_bytes(packet)
     assert packet.payload == UjoStringC("NewValue")
     assert input_packet3.uid == packet.uid
 
diff --git a/test/test_logging.py b/test/test_logging.py
index 16565ca..9b841f8 100644
--- a/test/test_logging.py
+++ b/test/test_logging.py
@@ -10,15 +10,11 @@
 import logging
 import platform
 from queue import Queue
-from unittest.mock import patch
+from unittest.mock import patch, MagicMock
 
-from kafka import KafkaProducer
-
-from log import RMQ_LOG_EXCHANGE
 from ujotypes import read_buffer
 
 from titanfe import log
-from titanfe.config import configuration
 
 
 class Ignored:  # pylint: disable=too-few-public-methods
@@ -34,17 +30,16 @@ def test_log_record_is_titan_specific():
     assert all(hasattr(record, field) for field in titan_log_fields)
 
 
-def test_logging_sends_contextual_bin_ujo_to_kafka():
-    """ assure the log record is sent to kafka as a binary ujo map"""
+def test_logging_sends_contextual_bin_ujo_via_rmq():
+    """ assure the log record is sent to rabbitmq as a binary ujo map"""
 
     rmq_location = "titanfe.log.RMQ"
 
-
     class RMQDummy():
-        """ a Kafka Dummy that sets the nonlocal variables
-            topic and message to the according values on send """
+        """a RMQ Dummy"""
 
         published = Queue()
+        channel = MagicMock()
 
         # pylint: disable=super-init-not-called, arguments-differ
         def __init__(self, *args, **kwargs):
@@ -77,6 +72,6 @@ def test_logging_sends_contextual_bin_ujo_to_kafka():
 
     exchange, queue, message = RMQDummy.published.get(timeout=0.1)
 
-    assert exchange == RMQ_LOG_EXCHANGE
+    assert exchange == log.RMQ_LOG_EXCHANGE
     assert queue == "titanfe.test"
     assert read_buffer(message).as_pyobject() == expected
diff --git a/titanfe/apps/brick_runner/brick.py b/titanfe/apps/brick_runner/brick.py
index caad931..9500e37 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/titanfe/apps/brick_runner/brick.py
@@ -124,9 +124,9 @@ class Brick:
 
         self.run_instance_processing(payload, packet.port)
 
-        await self.metric_emitter.emit_brick_metrics(self.execution_time)
+        self.metric_emitter.emit_brick_metrics(self.execution_time)
         if self.is_outlet:
-            await self.metric_emitter.emit_packet_metrics(packet, self.execution_time)
+            self.metric_emitter.emit_packet_metrics(packet, self.execution_time)
 
     def run_instance_processing(self, payload, port):
         """do the actual execution of the brick module and return its result"""
diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index b875cc2..abf76ec 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -10,13 +10,9 @@ import asyncio
 import functools
 import typing as T
 
-import aioamqp
-import aioamqp.protocol
-import aioamqp.channel
 
 import titanfe.log
 from titanfe.apps.brick_runner.packet import Packet
-from titanfe.config import configuration as config
 
 Port = T.NewType("Port", str)
 
@@ -108,5 +104,3 @@ class Input:
         done_callback = self._packet_done_callbacks.pop(packet)
         asyncio.create_task(done_callback())
         self._packets.task_done()
-
-
diff --git a/titanfe/apps/brick_runner/metrics.py b/titanfe/apps/brick_runner/metrics.py
index aaf3c64..df2eaeb 100644
--- a/titanfe/apps/brick_runner/metrics.py
+++ b/titanfe/apps/brick_runner/metrics.py
@@ -5,85 +5,47 @@
 # found in the LICENSE file in the root directory of this source tree.
 #
 
-"""Handle creation of metric data and streaming it to Kafka"""
-
-import asyncio
-import pickle
+"""Handle creation of metric data and streaming via rabbitmq"""
+import json
 import platform
 from abc import ABC
 from dataclasses import dataclass, field
 
-import aiokafka
-
 import titanfe.log
-from titanfe.config import configuration
-from titanfe.utils import DictConvertable, iso_utc_time_string, cancel_tasks
+from titanfe.utils import DictConvertable, iso_utc_time_string
+
+
+RMQ_METRIC_EXCHANGE = "openfba.metrics"
+RMQ_BRICK_METRICS_QUEUE = "openfba.metrics.bricks"
+RMQ_PACKET_METRICS_QUEUE = "openfba.metrics.packets"
 
 
 class MetricEmitter:
-    """The MetricEmitter encapsulates creation of metric data and sending them to a Kafka instance
+    """The MetricEmitter encapsulates creation of metric data and sending them tovia rabbitmq
 
     Arguments:
-        metrics_metadata (dict): base meta data of metrics emitted
+        metrics_metadata (dict): base metadata of metrics emitted
         logger (logging.logger): the parent's logger instance
     """
 
-    def __init__(self, metrics_metadata, logger):
-        self.log = (
-            logger.getChild("MetricEmitter")
-            if logger
-            else titanfe.log.getLogger(__name__)
-        )
-        self.kafka = None
+    def __init__(self, metrics_metadata, transport, logger):
+        self.log = logger.getChild("MetricEmitter") if logger else titanfe.log.getLogger(__name__)
         self.metrics_meta = metrics_metadata
-
-    @classmethod
-    async def create_from_brick_runner(cls, runner) -> "MetricEmitter":
-        """Creates, starts and returns a MetricEmitter instance"""
-        metrics_meta = MetricsBase.extract_from_runner(runner)
-        emitter = cls(metrics_meta, runner.log)
-        await emitter.start()
-        return emitter
-
-    async def start(self):
-        """creates and starts the internal Kafka producer"""
-        if configuration.no_kafka_today or not configuration.kafka_bootstrap_servers:
-            self.log.info("Kafka is disabled or no bootstrap servers were given")
-            return
-
-        self.log.info("Starting Kafka producer")
-        self.kafka = aiokafka.AIOKafkaProducer(
-            loop=asyncio.get_event_loop(),
-            bootstrap_servers=configuration.kafka_bootstrap_servers,
-            # key_serializer=str.encode,
-            # value_serializer=str.encode
-            value_serializer=pickle.dumps,
+        self.transport = transport
+        self.transport.channel.exchange_declare(
+            RMQ_METRIC_EXCHANGE, exchange_type="topic", durable=True
         )
-        await self.kafka.start()
 
     def set_metadata_from_runner(self, runner):
         """assigns flowname and brickname after brickrunner has gotten his assignment"""
         self.metrics_meta = MetricsBase.extract_from_runner(runner)
 
-    async def emit(self, metrics_dict):
+    def emit(self, queue, metrics_dict):
         """emit the metrics"""
         self.log.metric("%s", metrics_dict)
+        self.transport.publish(RMQ_METRIC_EXCHANGE, queue, bytes(json.dumps(metrics_dict), "utf-8"))
 
-        if self.kafka:
-            try:
-                await self.kafka.send("titanfe.metrics", metrics_dict)
-            except aiokafka.errors.ProducerClosed:
-                pass  # we are most likely shutting down operations
-
-    async def emit_queue_metrics(self, queue_name, queue_length):
-        queue_metrics = QueueMetrics(
-            **self.metrics_meta, queue_name=queue_name, queue_length=queue_length
-        )
-        await self.emit(queue_metrics.to_dict())
-
-    async def emit_packet_metrics(
-        self, packet, duration
-    ):  # pylint: disable=missing-docstring
+    def emit_packet_metrics(self, packet, duration):  # pylint: disable=missing-docstring
         packet_metrics = PacketMetricsAtBrick(
             **self.metrics_meta,
             packet=packet.uid,
@@ -91,44 +53,11 @@ class MetricEmitter:
             traveling_time=packet.traveling_time,
             **packet.queue_times,
         )
-        await self.emit(packet_metrics.to_dict())
+        self.emit(RMQ_PACKET_METRICS_QUEUE, packet_metrics.to_dict())
 
-    async def emit_brick_metrics(self, execution_time):
+    def emit_brick_metrics(self, execution_time):
         brick_metrics = BrickMetrics(**self.metrics_meta, execution_time=execution_time)
-        await self.emit(brick_metrics.to_dict())
-
-    async def stop(self):
-        if self.kafka is not None:
-            await self.kafka.flush()
-            await self.kafka.stop()
-
-
-class QueueWithMetrics(asyncio.Queue):
-    """an ayncio.Queue that emits metrics (queue length)"""
-
-    def __init__(self, emitter, name, interval=0.1, maxsize=0):
-        super().__init__(maxsize)
-
-        self.name = name
-        self.metrics = asyncio.create_task(self.emit_metrics(emitter, interval))
-
-    async def emit_metrics(self, emitter, interval=0.1):
-        """automatically scheduled as task"""
-        while True:
-            await asyncio.sleep(interval)
-            queue_length = self.qsize()
-            if queue_length:
-                await emitter.emit_queue_metrics(self.name, queue_length)
-
-    async def put(self, item):
-        await super().put(item)
-
-    async def close(self):
-        await cancel_tasks((self.metrics,), wait_cancelled=True)
-
-    @property
-    def unfinished_tasks(self):
-        return self._unfinished_tasks
+        self.emit(RMQ_BRICK_METRICS_QUEUE, brick_metrics.to_dict())
 
 
 @dataclass
@@ -171,15 +100,6 @@ class PacketMetricsAtBrick(MetricsBase):
     at_outlet: bool = False  # TODO
 
 
-@dataclass
-class QueueMetrics(MetricsBase):
-    """Metric data for Input/Output-queues"""
-
-    content_type: str = "titan-queue-metrics"
-    queue_name: str = "QueueName?"
-    queue_length: int = 0
-
-
 @dataclass
 class BrickMetrics(MetricsBase):
     """Metric data for brick executions"""
diff --git a/titanfe/apps/brick_runner/output.py b/titanfe/apps/brick_runner/output.py
index 5ab0ffa..5ca544f 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/titanfe/apps/brick_runner/output.py
@@ -10,12 +10,10 @@ import typing as T
 from copy import deepcopy, copy
 from dataclasses import dataclass
 
-import pika
 from UJOSchema import schema_to_type
 from ujotypes import UjoBase, UJO_VARIANT_NONE
 
 import titanfe.log
-from titanfe.config import configuration as config
 from titanfe.apps.brick_runner.packet import Packet
 from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
 from titanfe.apps.control_peer.brick import Connection
diff --git a/titanfe/apps/brick_runner/runner.py b/titanfe/apps/brick_runner/runner.py
index 4eb4634..1c920e2 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/titanfe/apps/brick_runner/runner.py
@@ -78,7 +78,7 @@ class BrickRunner:
             logger=self.log,
         )
 
-        self.metric_emitter = await MetricEmitter.create_from_brick_runner(self)
+        self.metric_emitter = MetricEmitter(metrics_metadata={}, transport=RMQ(), logger=self.log)
 
         self.brick = Brick(brick_definition, self.metric_emitter, self.log, self.output)
 
@@ -133,8 +133,7 @@ class BrickRunner:
             # stop async stuff
             if not self.brick.is_inlet:
                 await self.input.stop()
-
-            await self.metric_emitter.stop()
+            RMQ().close()
 
         asyncio.create_task(shutdown())
 
diff --git a/titanfe/apps/control_peer/config.yaml b/titanfe/apps/control_peer/config.yaml
index 700bcdb..e29eb3f 100644
--- a/titanfe/apps/control_peer/config.yaml
+++ b/titanfe/apps/control_peer/config.yaml
@@ -5,6 +5,3 @@ RepositoryService: "http://localhost:8085/object" # Address of the Repository se
 RabbitMQUrl: "amqp://guest:guest@localhost:5672"
 IP: "127.0.0.1" # IP address of the host where the CP runs
 EndpointProvider: "tcp://192.168.178.43:9021" # Address of the EndpointProviders ZeroMQ router
-Kafka: "localhost:9092" # Address of the Kafka bootstrap server"
-#Kafka: "10.14.0.23:9092"
-#Kafka: "192.168.69.128:9092"
\ No newline at end of file
diff --git a/titanfe/config.py b/titanfe/config.py
index 81e311f..cdf64a4 100644
--- a/titanfe/config.py
+++ b/titanfe/config.py
@@ -9,7 +9,6 @@
 import urllib.parse
 from dataclasses import dataclass
 import os
-from ast import literal_eval
 
 # pylint: disable=invalid-name
 from pathlib import Path
@@ -18,8 +17,7 @@ from typing import Union
 from ruamel import yaml
 from ruamel.yaml import YAMLError
 
-DEFAULT_KAFKA_BOOTSTRAP_SERVER = "10.14.0.23:9092"
-DEFAULT_KAFKA_LOG_TOPIC = "titan.logs"
+
 DEFAULT_RABBITMQ_URL = "amqp://guest:guest@localhost:5672"
 DEFAULT_GRIDMANAGER_ADDRESS = "http://localhost:8080/gridmanager"
 DEFAULT_FLOWMANAGER_ADDRESS = "http://localhost:9002/flowmanager"
@@ -43,8 +41,6 @@ OPTION_ALIASES = {
     "packagemanager_address": "PackageManager",
     "reposervice_address": "RepositoryService",
     "rabbitmq_url": "RabbitMQUrl",
-    "kafka_bootstrap_servers": "Kafka",
-    "kafka_log_topic": "KafkaLogTopic",
     "brick_folder": "BrickFolder",
     "secret_key": "SecretKey",
     "endpoint_provider": "EndpointProvider",
@@ -63,14 +59,6 @@ class RabbitMQConnectionParams:
 class Configuration:
     """Current Configuration"""
 
-    kafka_bootstrap_servers: str = DEFAULT_KAFKA_BOOTSTRAP_SERVER
-    kafka_log_topic: str = DEFAULT_KAFKA_LOG_TOPIC
-
-    no_kafka_today: bool = literal_eval(
-        os.getenv("TITAN_METRICS_DISABLED")
-        or os.getenv("TITANFE_WITHOUT_KAFKA")
-        or "False"
-    )
     rabbitmq_url: str = DEFAULT_RABBITMQ_URL
     gridmanager_address: str = DEFAULT_GRIDMANAGER_ADDRESS
     flowmanager_address: str = DEFAULT_FLOWMANAGER_ADDRESS
diff --git a/titanfe/log.py b/titanfe/log.py
index e458433..b30e050 100644
--- a/titanfe/log.py
+++ b/titanfe/log.py
@@ -134,7 +134,7 @@ def getLogger(  # pylint: disable=invalid-name ; noqa: N802
 
 
 def initialize(service=""):
-    """initialize the titan logging module, e.g. set up a KafkaLogHandler
+    """initialize the openfba logging module
 
     Args:
         service: name of the current service
@@ -233,12 +233,10 @@ class RabbitMQLogHandler(logging.Handler):
 
     def emit(self, record):
         """emits the record"""
-        # if record.name.startswith("???"):
-        #     # drop kafka logging to avoid infinite recursion
-        #     return
-
         try:
             log_message = self.format(record)
+            if not isinstance(log_message, bytes):
+                log_message = bytes(log_message, "utf-8")
             self.producer.publish(self.exchange_name, record.name, log_message)
         except Exception:  # pylint: disable=broad-except
             exc_info = sys.exc_info()
diff --git a/titanfe/rabbitmq.py b/titanfe/rabbitmq.py
index f491dfa..97e29d9 100644
--- a/titanfe/rabbitmq.py
+++ b/titanfe/rabbitmq.py
@@ -1,10 +1,17 @@
+#
+# Copyright (c) 2019-present, wobe-systems GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# found in the LICENSE file in the root directory of this source tree.
+#
+
+"""thin rabbitmq wrapper"""
 import asyncio
 
 import aioamqp
 import aioamqp.channel
 
 import pika
-from pika.adapters.blocking_connection import BlockingConnection, BlockingChannel
 
 from titanfe.config import configuration as config
 
@@ -12,7 +19,7 @@ from titanfe.config import configuration as config
 # TODO: Handle Reconnect?
 
 class RMQ:
-    """RabbitMQ transport for publishing"""
+    """synchronous RabbitMQ transport for publishing"""
     _instance = None  # singleton
 
     def __new__(cls, *args, **kwargs):
@@ -34,6 +41,7 @@ class RMQ:
         self.channel = self.connection.channel()
 
     def close(self):
+        """close the connection"""
         for conn in (self.channel, self.connection):
             try:
                 conn.close()
@@ -41,7 +49,8 @@ class RMQ:
                 pass
         self._instance = None
 
-    def publish(self, exchange_name, queue_name, message):
+    def publish(self, exchange_name: str, queue_name: str, message: bytes):
+        """publish a message on the given exchange with the given routing key"""
         self.channel.basic_publish(
             exchange=exchange_name,
             routing_key=queue_name,
@@ -50,7 +59,7 @@ class RMQ:
 
 
 class AsyncRMQ:
-    """RabbitMQ transport for packets"""
+    """asynchronous RabbitMQ transport for consuming"""
 
     # TODO: make "robust" - handle reconnect and stuff
     #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
@@ -68,6 +77,7 @@ class AsyncRMQ:
         self._consumers = {}
 
     def close(self):
+        """close the connection"""
         for conn in (self._channel, self._connection):
             if not conn:
                 continue
@@ -129,5 +139,3 @@ class AsyncRMQ:
         tag = self._consumers.pop(queue_name)
         channel = await self.channel()
         await channel.basic_cancel(tag)
-
-
-- 
GitLab


From ccd5545be179b5a1c472cf52aafafc296fe70d30 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 2 Nov 2023 12:56:46 +0100
Subject: [PATCH 21/29] use fanout exchanges for logging and metrics

---
 titanfe/apps/brick_runner/metrics.py | 2 +-
 titanfe/log.py                       | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/titanfe/apps/brick_runner/metrics.py b/titanfe/apps/brick_runner/metrics.py
index df2eaeb..842767a 100644
--- a/titanfe/apps/brick_runner/metrics.py
+++ b/titanfe/apps/brick_runner/metrics.py
@@ -33,7 +33,7 @@ class MetricEmitter:
         self.metrics_meta = metrics_metadata
         self.transport = transport
         self.transport.channel.exchange_declare(
-            RMQ_METRIC_EXCHANGE, exchange_type="topic", durable=True
+            RMQ_METRIC_EXCHANGE, exchange_type="fanout", durable=True
         )
 
     def set_metadata_from_runner(self, runner):
diff --git a/titanfe/log.py b/titanfe/log.py
index b30e050..f8d367a 100644
--- a/titanfe/log.py
+++ b/titanfe/log.py
@@ -147,7 +147,7 @@ def initialize(service=""):
         logging.config.dictConfig(log_config)
 
     rmq_publisher = RMQ()
-    rmq_publisher.channel.exchange_declare(RMQ_LOG_EXCHANGE, exchange_type="topic", durable=True)
+    rmq_publisher.channel.exchange_declare(RMQ_LOG_EXCHANGE, exchange_type="fanout", durable=True)
 
     rmq_handler = RabbitMQLogHandler(rmq_publisher, exchange_name=RMQ_LOG_EXCHANGE)
     root = logging.getLogger()
-- 
GitLab


From fc8e070d11eb60f0fdc5bd8a33c482b177e08a4e Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Thu, 2 Nov 2023 14:31:24 +0100
Subject: [PATCH 22/29] fix missing await for the done_callback

---
 titanfe/apps/brick_runner/input.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/titanfe/apps/brick_runner/input.py b/titanfe/apps/brick_runner/input.py
index abf76ec..9f72c1d 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/titanfe/apps/brick_runner/input.py
@@ -89,7 +89,7 @@ class Input:
                 packet = Packet.from_bytes(packet)
         except Exception:  # pylint: disable=broad-except  # who knows what might go wrong
             self.log.error("Failed to convert message to packet", exc_info=True)
-            done_callback()
+            await done_callback()
             return
 
         packet.update_input_entry()
-- 
GitLab


From 9b80019ce366fb17c07253840f12bc0fa9ce1f51 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 6 Nov 2023 13:11:42 +0100
Subject: [PATCH 23/29] make fit for Python 3.12 :)

---
 requirements_dev.txt                      |  12 +-
 requirements_prod.txt                     |  36 ++--
 setup.py                                  |  35 ++--
 titanfe/apps/brick_runner/grid_manager.py |  50 -----
 titanfe/apps/brick_runner/packet.py       |   4 +-
 titanfe/apps/control_peer/services.py     |  77 +++++++-
 titanfe/apps/rmq_to_elastic/__init__.py   |   6 +
 titanfe/apps/rmq_to_elastic/__main__.py   | 223 ++++++++++++++++++++++
 titanfe/config.py                         |   6 +-
 titanfe/log.py                            |   2 +-
 titanfe/rabbitmq.py                       |   2 +
 11 files changed, 351 insertions(+), 102 deletions(-)
 delete mode 100644 titanfe/apps/brick_runner/grid_manager.py
 create mode 100644 titanfe/apps/rmq_to_elastic/__init__.py
 create mode 100644 titanfe/apps/rmq_to_elastic/__main__.py

diff --git a/requirements_dev.txt b/requirements_dev.txt
index 15e3d6d..023cfcc 100644
--- a/requirements_dev.txt
+++ b/requirements_dev.txt
@@ -2,9 +2,9 @@
 -r requirements_prod.txt
 
 # for development:
-flake8 == 4.0.1
-pylint == 2.11.1
-pytest == 6.2.5
-pytest-aiohttp == 1.0.4
-pytest-asyncio == 0.18.3
-pytest-localserver == 0.8.0
+flake8
+pylint
+pytest
+pytest-aiohttp
+pytest-asyncio
+pytest-localserver
diff --git a/requirements_prod.txt b/requirements_prod.txt
index 6ab4083..847964f 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -1,22 +1,20 @@
 # production
-ujotypes >= 0.1.2
-ujoschema >= 0.3.66
-dataclasses-json==0.5.14
-ruamel.yaml == 0.17.*
-janus == 1.0.0
-aiokafka == 0.5.2 # fixed due to the kafka-python later version require
-kafka-python == 1.4.6
-elasticsearch == 7.8.*  # strangely 7.9.0 is missing the async parts that were introduced with 7.8.0?
-fastapi == 0.85.1
-starlette ==0.20.4
-uvicorn == 0.9.*  # fixed due to the uvloop later versions require
-aiohttp >= 3.6.2
-aiohttp-requests >= 0.1.3  # as required for elasticsearch async
-requests == 2.31.0
-docopt == 0.6.2
-pycryptodome == 3.19.0
-aioamqp == 0.15.0
-pika == 1.3.2
+ujotypes
+ujoschema
+
+dataclasses-json
+ruamel.yaml 
+janus 
+elasticsearch
+fastapi
+uvicorn
+aiohttp == 3.9.0b
+# aiohttp-requests
+requests 
+docopt 
+pycryptodome 
+aioamqp 
+pika 
 
 # linux only:
-uvloop == 0.13.*;platform_system=="Linux"
+uvloop;platform_system=="Linux"
diff --git a/setup.py b/setup.py
index bb59cc9..9bc1478 100644
--- a/setup.py
+++ b/setup.py
@@ -63,25 +63,22 @@ setup(
     package_data={"titanfe": ["log_config.yml",
                               "apps/control_peer/config.yaml"]},
     install_requires=[
-        "ujotypes >=0.1.2",
-        "ujoschema >=0.3.66",
-        "ruamel.yaml == 0.17.*",
-        "janus == 1.0.0",
-        "aiokafka == 0.5.2",  # fixed due to the kafka-python version required by later versions
-        "kafka-python == 1.4.6",  # aiokafka 0.5.2 requires this version
-        "elasticsearch == 7.8.*",
-        "fastapi == 0.85.1",
-        "starlette == 0.20.4",
-        "uvicorn == 0.9.*",  # fixed due to the uvloop later versions require
-        'uvloop == 0.13.* ;platform_system=="Linux"',
-        "aiohttp >= 3.6.2",
-        "aiohttp-requests >= 0.1.3",
-        "dataclasses-json == 0.5.14",
-        "requests == 2.31.0",
-        "docopt == 0.6.2",
-        "aioamqp == 0.15.0",
-        "pika == 1.3.2",
-        "pycryptodome == 3.19.0",
+        "ujotypes",
+        "ujoschema",
+        "ruamel.yaml",
+        "janus",
+        "elasticsearch",
+        "fastapi",
+        "uvicorn"
+        'uvloop ;platform_system=="Linux"',
+        "aiohttp==3.9.0b",
+        #"aiohttp-requests",
+        "dataclasses-json",
+        "requests",
+        "docopt",
+        "aioamqp",
+        "pika",
+        "pycryptodome",
     ],
     ext_modules=[],
 )
diff --git a/titanfe/apps/brick_runner/grid_manager.py b/titanfe/apps/brick_runner/grid_manager.py
deleted file mode 100644
index 86ab825..0000000
--- a/titanfe/apps/brick_runner/grid_manager.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-"""GridManager communication"""
-
-import json
-from http import HTTPStatus
-
-from aiohttp.client_exceptions import ClientError
-from aiohttp_requests import requests
-
-from titanfe import log as logging
-from titanfe.config import configuration
-
-
-class GridManager:
-    """GridManager"""
-
-    def __init__(self, runner_uid, brick_uid):
-        self.runner_uid = runner_uid
-        self.brick_uid = brick_uid
-        self.log = logging.TitanPlatformLogger(__name__)
-
-    @property
-    def address(self):
-        return configuration.gridmanager_address
-
-    async def register_runner(self, runner_address):
-        """register brick runner at grid manager"""
-        payload = {
-            "runnerID": self.runner_uid,
-            "address": "%s:%s" % runner_address,  # pylint: disable=consider-using-f-string
-            "brickId": self.brick_uid,
-        }
-        response = await requests.post(f"{self.address}/brickrunners/", data=json.dumps(payload))
-        if response.status != HTTPStatus.OK:
-            error = ClientError(f"Failed to register at GridManager: {response!r}")
-            self.log.with_context.error(error)
-            raise error
-
-        return await response.json()
-
-    async def deregister_runner(self):
-        """deregister brick runner at grid manager"""
-        payload = {"runnerId": self.runner_uid, "brickId": self.brick_uid}
-        await requests.post(f"{self.address}/brickrunners/deregister", data=json.dumps(payload))
-        self.log.debug("Deregister: %r", payload)
diff --git a/titanfe/apps/brick_runner/packet.py b/titanfe/apps/brick_runner/packet.py
index d649877..6f2433c 100644
--- a/titanfe/apps/brick_runner/packet.py
+++ b/titanfe/apps/brick_runner/packet.py
@@ -13,7 +13,7 @@ import time
 from dataclasses import dataclass, field
 
 from ujotypes import UjoBase, UjoStringUTF8, read_buffer, UjoMap, ujo_to_python
-from ujotypes.variants.none import UJO_VARIANT_NONE
+from ujotypes.variants.none import UjoNone
 
 from titanfe.messages import PacketMessage
 from titanfe.ujo_helper import py_to_ujo_bytes
@@ -35,7 +35,7 @@ class Packet(DictConvertable):
     uid: str = field(default_factory=functools.partial(create_uid, "P-"))
     started: float = field(default_factory=time.time_ns)
     port: str = ""
-    payload: UjoBase = UJO_VARIANT_NONE
+    payload: UjoBase = field(default_factory=UjoNone)
     buffer: Buffer = field(default_factory=Buffer)
 
     # ancestors: list = field(default_factory=list)
diff --git a/titanfe/apps/control_peer/services.py b/titanfe/apps/control_peer/services.py
index b766c2b..9ad8428 100644
--- a/titanfe/apps/control_peer/services.py
+++ b/titanfe/apps/control_peer/services.py
@@ -11,7 +11,7 @@ from http import HTTPStatus
 from abc import ABC, abstractmethod
 
 from aiohttp.client_exceptions import ClientError
-from aiohttp_requests import Requests  # initiate a new client every time,
+# from aiohttp_requests import Requests  # initiate a new client every time,
 
 # because we don't know how many threads are used and each will have its own asyncio loop
 # there must be a better way, but right now I'm short on time...
@@ -137,5 +137,80 @@ class GridManager(ControlPeerServiceRegistration):
         return f"{self.address}/controlpeers"
 
 
+import aiohttp
+import functools
+
+# aiohttp_requests ist currently not available with python3.12
+# we didn't really need from it anyway, so here it is:
+
+# Patch ClientResponse.read to release immediately after read so we don't need to worry about that / use context manager
+_read_only = aiohttp.client_reqrep.ClientResponse.read
+async def _read_and_release(self):  # noqa
+    try:
+        data = await _read_only(self)
+    finally:
+        self.close()
+
+    return data
+aiohttp.client_reqrep.ClientResponse.read = _read_and_release
+
+
+class Requests:
+    """ Thin wrapper for aiohttp.ClientSession with Requests simplicity """
+    def __init__(self, *args, **kwargs):
+        self._session_args = (args, kwargs)
+        self._session = None
+
+    @property
+    def session(self):
+        """ An instance of aiohttp.ClientSession """
+        if not self._session or self._session.closed or self._session.loop.is_closed():
+            self._session = aiohttp.ClientSession(*self._session_args[0], **self._session_args[1])
+        return self._session
+
+    def __getattr__(self, attr):
+        if attr.upper() in aiohttp.hdrs.METH_ALL:
+            @functools.wraps(self.session._request)
+            def session_request(*args, **kwargs):
+                """
+                This ensures `self.session` is always called where it can check the session/loop state so can't use
+                functools.partials as monkeypatch seems to do something weird where __getattr__ is only called once for
+                each attribute after patch is undone
+                """
+                return self.session._request(attr.upper(), *args, **kwargs)
+
+            return session_request
+        else:
+            return super().__getattribute__(attr)
+
+    def close(self):
+        """
+        Close aiohttp.ClientSession.
+
+        This is useful to be called manually in tests if each test when each test uses a new loop. After close, new
+        requests will automatically create a new session.
+
+        Note: We need a sync version for `__del__` and `aiohttp.ClientSession.close()` is async even though it doesn't
+        have to be.
+        """
+        if self._session:
+            if not self._session.closed:
+                # Older aiohttp does not have _connector_owner
+                if not hasattr(self._session, '_connector_owner') or self._session._connector_owner:
+                    try:
+                        self._session._connector._close()  # New version returns a coroutine in close() as warning
+                    except Exception:
+                        self._session._connector.close()
+                self._session._connector = None
+            self._session = None
+
+    def __del__(self):
+        self.close()
+
+
+requests = Requests()
+
 package_manager = PackageManager()  # pylint: disable=invalid-name
 grid_manager = GridManager()  # pylint: disable=invalid-name
+
+
diff --git a/titanfe/apps/rmq_to_elastic/__init__.py b/titanfe/apps/rmq_to_elastic/__init__.py
new file mode 100644
index 0000000..331d18e
--- /dev/null
+++ b/titanfe/apps/rmq_to_elastic/__init__.py
@@ -0,0 +1,6 @@
+#
+# Copyright (c) 2019-present, wobe-systems GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# found in the LICENSE file in the root directory of this source tree.
+#
diff --git a/titanfe/apps/rmq_to_elastic/__main__.py b/titanfe/apps/rmq_to_elastic/__main__.py
new file mode 100644
index 0000000..01aa577
--- /dev/null
+++ b/titanfe/apps/rmq_to_elastic/__main__.py
@@ -0,0 +1,223 @@
+"""
+kafka_to_elastic
+
+Usage:
+  kafka_to_elastic [-k <bootstrap_servers>] [-e <elastic_host>] [-s <service_topics>] -t [<topics>]
+  kafka_to_elastic (-h | --help)
+
+Example:
+  kafka_to_elastic --kafka 127.0.0.1:9092 --elastic 127.0.0.1 -t a_topic another_topic
+
+Options:
+  -h, --help     Show this screen.
+
+  -k <bootstrap_servers>, --kafka=<bootstrap_servers>
+      the Kafka bootstrap_servers to connect to as `<host>:<port> <host:port> ...`
+      [default: 10.14.0.23:9092]
+
+  -e <elastic_host>, --elastic=<elastic_host>
+      the elastic host `<hostname_or_ip>` [default: 10.14.0.21]
+
+  -s <service_topics>, --service-topics=<service_topics>
+      topics of titan service logs `<one or more topics>` [default: titan.servicelogs]
+
+  -t <flowengine_topics>, --flowengine-topics=<flowengine_topics>
+      topics of titan service logs `<one or more topics>` [default: titanfe.metrics]
+"""
+
+# pylint: disable=broad-except, missing-docstring
+# missing-function-docstring, missing-class-docstring
+import argparse
+import os
+import asyncio
+import pickle
+import json
+import signal
+from contextlib import suppress
+from datetime import datetime
+from collections import namedtuple
+from aiokafka import AIOKafkaConsumer, ConsumerStoppedError
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.helpers import async_bulk
+
+KafkaTopics = namedtuple("KafkaTopics", ("name", "type"))
+SERVICE_TOPIC_TYPE = "service"
+FLOWENGINE_TOPIC_TYPE = "flowengine"
+
+
+async def main():
+    arg_parser = argparse.ArgumentParser()
+    arg_parser.add_argument(
+        "-e",
+        "--elastic",
+        type=str,
+        default="10.14.0.21",
+        help=" the elastic host `<hostname_or_ip>`",
+    )
+    arg_parser.add_argument(
+        "-r",
+        "--rmq",
+        type=str,
+        default="10.14.0.23:9092",
+        help=" the Kafka bootstrap_servers to connect to as `<host>:<port> <host:port> ...`",
+    )
+    arg_parser.add_argument(
+        "-l",
+        "--logs",
+        nargs="+",
+        default=["openfba.logging"],
+        help="topics of titan go service logs `<one or more topics>` [default: openfba.logging]",
+    )
+    arg_parser.add_argument(
+        "-m",
+        "--metrics",
+        nargs="+",
+        default=["openfba.metrics.*"],
+        help="routing keys of the flowengine metrics `<one or more topics>` [default: openfba.metrics.*]",
+    )
+    args = arg_parser.parse_args()
+
+    signals = signal.SIGINT, signal.SIGTERM
+
+    if os.name != "nt":  # not available on windows
+        signals += (signal.SIGHUP,)  # pylint: disable=no-member
+
+    for sign in signals:
+        signal.signal(sign, schedule_shutdown)
+
+    bootstrap_servers = args.rmq
+    elastic_host = args.elastic
+    topics = KafkaTopics(
+        name=args.topics + args.service_topics,
+        type={
+            **{topic: FLOWENGINE_TOPIC_TYPE for topic in args.topics},
+            **{topic: SERVICE_TOPIC_TYPE for topic in args.service_topics},
+        },
+    )
+
+    print("Reading", topics.name, "From", bootstrap_servers, "To", elastic_host)
+
+    async with KafkaReader(
+        topics.name, bootstrap_servers=bootstrap_servers
+    ) as kafka, ElasticWriter(
+        elastic_host=elastic_host
+    ) as elastic:  # pylint: disable= ; noqa
+        async for topic, records in kafka.read():
+            len_records = f"{len(records)} record{'s' if len(records) > 1 else ''}"
+            print(f"processing {len_records} from {topic.topic} of type {topics.type[topic.topic]}")
+            msgs = list(transform_kafka_to_elastic(records, topics.type[topic.topic]))
+            await elastic.bulk_insert(msgs)
+
+
+def schedule_shutdown(sign, _):
+    print(f"Received {signal.Signals(sign).name} ...")  # pylint: disable=no-member
+
+    async def shutdown():
+        tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
+        for task in tasks:
+            task.cancel()
+
+        print(f"Cancelling outstanding tasks ({len(tasks)})")
+        await asyncio.gather(*tasks)
+
+    asyncio.create_task(shutdown())
+
+
+class KafkaReader:
+    def __init__(self, topics, bootstrap_servers):
+        self.consumer = AIOKafkaConsumer(
+            *topics,
+            loop=asyncio.get_event_loop(),
+            bootstrap_servers=bootstrap_servers,
+            # auto_offset_reset='earliest',
+        )
+
+    async def start(self):
+        await self.consumer.start()
+
+    async def stop(self):
+        await self.consumer.stop()
+
+    async def __aenter__(self):
+        await self.start()
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        await self.stop()
+
+    def __aiter__(self):
+        return self
+
+    async def __anext__(self):
+        while True:
+            try:
+                batch = await self.consumer.getmany(timeout_ms=1000)
+            except (asyncio.CancelledError, ConsumerStoppedError):
+                raise StopAsyncIteration
+
+            if not batch:
+                print(".", end="", flush=True)
+                continue
+
+            return batch
+
+    async def read(self):
+        async for batch in self:
+            for topic, records in batch.items():
+                yield topic, records
+
+
+class ElasticWriter:
+    def __init__(self, elastic_host):
+        self.elastic = AsyncElasticsearch(hosts=[{"host": elastic_host}])
+
+    async def __aenter__(self):
+        await self.elastic.__aenter__()
+        return self
+
+    async def __aexit__(self, exc_type, exc_val, exc_tb):
+        await self.elastic.close()
+
+    async def bulk_insert(self, document_generator):
+        await async_bulk(self.elastic, document_generator)
+
+
+def transform_kafka_to_elastic(batch, topic_type):
+    def transform(message):
+        content = pickle.loads(message.value)
+        content["@timestamp"] = content.pop("timestamp")
+
+        doc_type = content["content_type"]
+        index = f"{doc_type}-{datetime.now():%Y-%m-%d}"
+
+        return {"_op_type": "index", "_index": index, "_type": doc_type, "_source": content}
+
+    def transform_service_log(message):
+        content = json.loads(message.value)
+        content["@timestamp"] = content.pop("time")
+        package = content["package"].split("/")[0]
+        index = f"{package.lower()}-{datetime.now():%Y-%m-%d}"
+
+        return {"_op_type": "index", "_index": index, "_type": "service", "_source": content}
+
+    for message in batch:
+        try:
+            if topic_type == SERVICE_TOPIC_TYPE:
+                yield transform_service_log(message)
+            else:
+                yield transform(message)
+
+        except Exception as error:
+            print("Failed to transform ", message, error)
+
+
+if __name__ == "__main__":
+
+    async def run_main():
+        try:
+            with suppress(asyncio.CancelledError):
+                await main()
+        except Exception as error:
+            print("Error:", repr(error))
+
+    asyncio.run(run_main())
diff --git a/titanfe/config.py b/titanfe/config.py
index cdf64a4..f5cf1ff 100644
--- a/titanfe/config.py
+++ b/titanfe/config.py
@@ -14,9 +14,7 @@ import os
 from pathlib import Path
 from typing import Union
 
-from ruamel import yaml
-from ruamel.yaml import YAMLError
-
+from ruamel.yaml import YAMLError, YAML
 
 DEFAULT_RABBITMQ_URL = "amqp://guest:guest@localhost:5672"
 DEFAULT_GRIDMANAGER_ADDRESS = "http://localhost:8080/gridmanager"
@@ -87,7 +85,7 @@ class Configuration:
         """Read and update the configuration from a yaml file"""
         try:
             with open(file_path) as f:  # pylint: disable=unspecified-encoding
-                config = yaml.safe_load(f)
+                config = YAML(typ='safe', pure=True).load(f)
             self.update(config)
         except OSError as error:
             print("Could not read config file", file_path, "-", error)
diff --git a/titanfe/log.py b/titanfe/log.py
index f8d367a..423a327 100644
--- a/titanfe/log.py
+++ b/titanfe/log.py
@@ -143,7 +143,7 @@ def initialize(service=""):
 
     log_config_file = pathlib.Path(__file__).parent / "log_config.yml"
     with open(log_config_file) as cfile:  # pylint: disable=unspecified-encoding
-        log_config = ruamel.yaml.safe_load(cfile)
+        log_config = ruamel.yaml.YAML(typ='safe', pure=True).load(cfile)
         logging.config.dictConfig(log_config)
 
     rmq_publisher = RMQ()
diff --git a/titanfe/rabbitmq.py b/titanfe/rabbitmq.py
index 97e29d9..a5a7332 100644
--- a/titanfe/rabbitmq.py
+++ b/titanfe/rabbitmq.py
@@ -139,3 +139,5 @@ class AsyncRMQ:
         tag = self._consumers.pop(queue_name)
         channel = await self.channel()
         await channel.basic_cancel(tag)
+
+#class RMQStream:
\ No newline at end of file
-- 
GitLab


From 369829abf5b43b45b3a6b8c4c5fcc4a2825ce26f Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 6 Nov 2023 15:25:24 +0100
Subject: [PATCH 24/29] rename titanfe to openfba

---
 .gitlab-ci.yml                                |  12 +-
 examples/bricks/benchmark/display.py          |   4 +-
 examples/bricks/benchmark/generator.py        |   4 +-
 examples/bricks/decrementer/decrementer.py    |   6 +-
 examples/bricks/display/display.py            |   6 +-
 examples/bricks/duplicator/duplicator.py      |   6 +-
 examples/bricks/generator/generator.py        |   6 +-
 examples/bricks/incrementer/incrementer.py    |   6 +-
 examples/bricks/incrementer/test_increment.py |   4 +-
 examples/bricks/kafkawriter/kafkawriter.py    |   2 +-
 examples/bricks/modulebrick/__init__.py       |   4 +-
 .../bricks/rest_display/src/rest_display.py   |   6 +-
 examples/bricks/selector/selector.py          |   6 +-
 .../rmq_to_elastic => openfba}/__init__.py    |   2 +-
 {titanfe => openfba/apps}/__init__.py         |   0
 .../apps/brick_runner}/__init__.py            |   0
 .../apps/brick_runner/__main__.py             |   9 +-
 .../apps/brick_runner/adapter.py              |   4 +-
 .../apps/brick_runner/brick.py                |  12 +-
 .../apps/brick_runner/input.py                |   6 +-
 .../apps/brick_runner/metrics.py              |   6 +-
 .../apps/brick_runner/output.py               |  10 +-
 .../apps/brick_runner/packet.py               |   8 +-
 .../apps/brick_runner/runner.py               |   6 +-
 .../apps/brick_runner/value_mapping.py        |   2 +-
 .../apps/control_peer}/__init__.py            |   0
 .../apps/control_peer/__main__.py             |  14 +-
 .../apps/control_peer/brick.py                |   6 +-
 .../apps/control_peer/config.yaml             |   0
 .../apps/control_peer/control_peer.py         |   4 +-
 .../apps/control_peer/runner.py               |   8 +-
 .../apps/control_peer/services.py             |   4 +-
 .../apps/control_peer/webapi/__init__.py      |   0
 .../apps/control_peer/webapi/app.py           |   2 +-
 .../apps/control_peer/webapi/bricks.py        |   4 +-
 .../apps/control_peer/webapi/flows.py         |   0
 .../apps/control_peer/webapi/state.py         |   0
 .../apps/kafka_to_elastic}/__init__.py        |   0
 .../apps/kafka_to_elastic/__main__.py         |   4 +-
 .../apps/kafka_viewer}/__init__.py            |   0
 .../apps/kafka_viewer/__main__.py             |   2 +-
 .../apps/rmq_to_elastic}/__init__.py          |   0
 .../apps/rmq_to_elastic/__main__.py           |   0
 {titanfe => openfba}/brick.py                 |   2 +-
 {titanfe => openfba}/config.py                | 212 +++++++++---------
 {titanfe => openfba}/connection.py            |  10 +-
 {titanfe => openfba}/constants.py             |   0
 {titanfe => openfba}/get-pip.py               |   0
 {titanfe => openfba}/log.py                   |  12 +-
 {titanfe => openfba}/log_config.yml           |   4 +-
 {titanfe => openfba}/messages.py              |   0
 {titanfe => openfba}/rabbitmq.py              |   2 +-
 {titanfe => openfba}/repository.py            |   4 +-
 {titanfe => openfba}/testing/__init__.py      |   0
 {titanfe => openfba}/testing/testrunner.py    |  18 +-
 {titanfe => openfba}/ujo_helper.py            |   0
 {titanfe => openfba}/utils.py                 |   2 +-
 setup.py                                      |  33 +--
 test/brick_runner/conftest.py                 |  14 +-
 test/brick_runner/test_adapter.py             |   4 +-
 test/brick_runner/test_metrics.py             |   6 +-
 test/brick_runner/test_portMapping.py         |  10 +-
 test/brick_runner/test_runner.py              |   8 +-
 test/control_peer/test_install_brick.py       |   6 +-
 test/control_peer/test_repository.py          |   6 +-
 test/control_peer/test_webapi.py              |   4 +-
 test/test_logging.py                          |  14 +-
 test/test_testrunner.py                       |   6 +-
 titanfe/__init__.pyc                          | Bin 106 -> 0 bytes
 titanfe/apps/__init__.pyc                     | Bin 111 -> 0 bytes
 titanfe/apps/control_peer/__init__.pyc        | Bin 124 -> 0 bytes
 71 files changed, 288 insertions(+), 274 deletions(-)
 rename {titanfe/apps/rmq_to_elastic => openfba}/__init__.py (98%)
 rename {titanfe => openfba/apps}/__init__.py (100%)
 rename {titanfe/apps => openfba/apps/brick_runner}/__init__.py (100%)
 rename {titanfe => openfba}/apps/brick_runner/__main__.py (86%)
 rename {titanfe => openfba}/apps/brick_runner/adapter.py (97%)
 rename {titanfe => openfba}/apps/brick_runner/brick.py (94%)
 rename {titanfe => openfba}/apps/brick_runner/input.py (95%)
 rename {titanfe => openfba}/apps/brick_runner/metrics.py (96%)
 rename {titanfe => openfba}/apps/brick_runner/output.py (92%)
 rename {titanfe => openfba}/apps/brick_runner/packet.py (93%)
 rename {titanfe => openfba}/apps/brick_runner/runner.py (97%)
 rename {titanfe => openfba}/apps/brick_runner/value_mapping.py (99%)
 rename {titanfe/apps/brick_runner => openfba/apps/control_peer}/__init__.py (100%)
 rename {titanfe => openfba}/apps/control_peer/__main__.py (88%)
 rename {titanfe => openfba}/apps/control_peer/brick.py (99%)
 rename {titanfe => openfba}/apps/control_peer/config.yaml (100%)
 rename {titanfe => openfba}/apps/control_peer/control_peer.py (98%)
 rename {titanfe => openfba}/apps/control_peer/runner.py (95%)
 rename {titanfe => openfba}/apps/control_peer/services.py (98%)
 rename {titanfe => openfba}/apps/control_peer/webapi/__init__.py (100%)
 rename {titanfe => openfba}/apps/control_peer/webapi/app.py (98%)
 rename {titanfe => openfba}/apps/control_peer/webapi/bricks.py (95%)
 rename {titanfe => openfba}/apps/control_peer/webapi/flows.py (100%)
 rename {titanfe => openfba}/apps/control_peer/webapi/state.py (100%)
 rename {titanfe/apps/control_peer => openfba/apps/kafka_to_elastic}/__init__.py (100%)
 rename {titanfe => openfba}/apps/kafka_to_elastic/__main__.py (99%)
 rename {titanfe/apps/kafka_to_elastic => openfba/apps/kafka_viewer}/__init__.py (100%)
 rename {titanfe => openfba}/apps/kafka_viewer/__main__.py (96%)
 rename {titanfe/apps/kafka_viewer => openfba/apps/rmq_to_elastic}/__init__.py (100%)
 rename {titanfe => openfba}/apps/rmq_to_elastic/__main__.py (100%)
 rename {titanfe => openfba}/brick.py (97%)
 rename {titanfe => openfba}/config.py (95%)
 rename {titanfe => openfba}/connection.py (95%)
 rename {titanfe => openfba}/constants.py (100%)
 rename {titanfe => openfba}/get-pip.py (100%)
 rename {titanfe => openfba}/log.py (96%)
 rename {titanfe => openfba}/log_config.yml (94%)
 rename {titanfe => openfba}/messages.py (100%)
 rename {titanfe => openfba}/rabbitmq.py (98%)
 rename {titanfe => openfba}/repository.py (97%)
 rename {titanfe => openfba}/testing/__init__.py (100%)
 rename {titanfe => openfba}/testing/testrunner.py (94%)
 rename {titanfe => openfba}/ujo_helper.py (100%)
 rename {titanfe => openfba}/utils.py (99%)
 delete mode 100644 titanfe/__init__.pyc
 delete mode 100644 titanfe/apps/__init__.pyc
 delete mode 100644 titanfe/apps/control_peer/__init__.pyc

diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 1d905ba..6c938be 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -8,7 +8,7 @@ stages:
 ##############################################
 # Build Stage
 
-build_titanfe:
+build_openfba:
   stage: build
   tags:
     - dockerex
@@ -18,9 +18,9 @@ build_titanfe:
     # execute python site-package build
     - /opt/python/cp37-cp37m/bin/pip wheel . -w ./site-packages
     # validate the sitepackage
-    - auditwheel show $(find ./site-packages/titanfe-*.whl)
+    - auditwheel show $(find ./site-packages/openfba-*.whl)
     # move only required packages to wheelhouse
-    - cp -v $(find ./site-packages/titanfe-*.whl) ./wheelhouse
+    - cp -v $(find ./site-packages/openfba-*.whl) ./wheelhouse
   artifacts:
     expire_in: 6 mos
     paths:
@@ -36,7 +36,7 @@ test_br_cp:
   image: industrialdevops/flowengine-py-testcontainer:latest
   before_script:
     # install python pip package
-    - pip3 install ./wheelhouse/titanfe*.whl
+    - pip3 install ./wheelhouse/openfba*.whl
     - pip3 install -r requirements_dev.txt
   script:
     - python3.7 -m pytest
@@ -49,10 +49,10 @@ pylint_br_cp:
   allow_failure: false
   before_script:
     # install python pip package
-    - pip3 install ./wheelhouse/titanfe*.whl
+    - pip3 install ./wheelhouse/openfba*.whl
   script:
     # run pylint for python module
-    - pylint --rcfile=.pylintrc ./titanfe
+    - pylint --rcfile=.pylintrc ./openfba
     - pylint --rcfile=.pylintrc --disable=duplicate-code ./test
 
 flake8:
diff --git a/examples/bricks/benchmark/display.py b/examples/bricks/benchmark/display.py
index 83131f1..46aa598 100644
--- a/examples/bricks/benchmark/display.py
+++ b/examples/bricks/benchmark/display.py
@@ -2,8 +2,8 @@
 # pylint: skip-file
 import time
 
-import titanfe.log
-log = titanfe.log.getLogger(__name__)
+import openfba.log
+log = openfba.log.getLogger(__name__)
 
 
 def do_brick_processing(brick, params, input):
diff --git a/examples/bricks/benchmark/generator.py b/examples/bricks/benchmark/generator.py
index 8dda038..9b17da2 100644
--- a/examples/bricks/benchmark/generator.py
+++ b/examples/bricks/benchmark/generator.py
@@ -5,9 +5,9 @@ import time
 
 from ujotypes import UjoInt64, UjoStringC
 
-import titanfe.log
+import openfba.log
 
-log = titanfe.log.getLogger(__name__)
+log = openfba.log.getLogger(__name__)
 
 
 def do_brick_processing(brick, parameters, input):
diff --git a/examples/bricks/decrementer/decrementer.py b/examples/bricks/decrementer/decrementer.py
index 738afde..caf918d 100644
--- a/examples/bricks/decrementer/decrementer.py
+++ b/examples/bricks/decrementer/decrementer.py
@@ -3,9 +3,9 @@
 
 import time
 
-from titanfe.brick import BrickBase
-import titanfe.log
-log = titanfe.log.getLogger(__name__)
+from openfba.brick import BrickBase
+import openfba.log
+log = openfba.log.getLogger(__name__)
 
 
 class Brick(BrickBase):
diff --git a/examples/bricks/display/display.py b/examples/bricks/display/display.py
index e67aaf6..f92454e 100644
--- a/examples/bricks/display/display.py
+++ b/examples/bricks/display/display.py
@@ -1,10 +1,10 @@
 # Sample brick: display
 # pylint: skip-file
 
-from titanfe.brick import BrickBase
-import titanfe.log
+from openfba.brick import BrickBase
+import openfba.log
 
-log = titanfe.log.getLogger(__name__)
+log = openfba.log.getLogger(__name__)
 
 
 class Brick(BrickBase):
diff --git a/examples/bricks/duplicator/duplicator.py b/examples/bricks/duplicator/duplicator.py
index a6a4beb..ee60248 100644
--- a/examples/bricks/duplicator/duplicator.py
+++ b/examples/bricks/duplicator/duplicator.py
@@ -3,9 +3,9 @@
 
 import time
 
-from titanfe.brick import BrickBase
-import titanfe.log
-log = titanfe.log.getLogger(__name__)
+from openfba.brick import BrickBase
+import openfba.log
+log = openfba.log.getLogger(__name__)
 
 
 class Brick(BrickBase):
diff --git a/examples/bricks/generator/generator.py b/examples/bricks/generator/generator.py
index 5827945..e2e4bd5 100644
--- a/examples/bricks/generator/generator.py
+++ b/examples/bricks/generator/generator.py
@@ -4,10 +4,10 @@
 import time
 
 from ujotypes import UjoInt64
-from titanfe.brick import BrickBase
-import titanfe.log
+from openfba.brick import BrickBase
+import openfba.log
 
-log = titanfe.log.getLogger(__name__)
+log = openfba.log.getLogger(__name__)
 
 
 class Brick(BrickBase):
diff --git a/examples/bricks/incrementer/incrementer.py b/examples/bricks/incrementer/incrementer.py
index cc1e17d..7bae34e 100644
--- a/examples/bricks/incrementer/incrementer.py
+++ b/examples/bricks/incrementer/incrementer.py
@@ -3,9 +3,9 @@
 
 import time
 
-from titanfe.brick import BrickBase
-import titanfe.log
-log = titanfe.log.getLogger(__name__)
+from openfba.brick import BrickBase
+import openfba.log
+log = openfba.log.getLogger(__name__)
 
 
 class Brick(BrickBase):
diff --git a/examples/bricks/incrementer/test_increment.py b/examples/bricks/incrementer/test_increment.py
index 8aa09fc..5b50f85 100644
--- a/examples/bricks/incrementer/test_increment.py
+++ b/examples/bricks/incrementer/test_increment.py
@@ -6,8 +6,8 @@ from functools import partial
 
 import pytest
 
-from titanfe.constants import DEFAULT_PORT
-from titanfe.testing import TestRunner
+from openfba.constants import DEFAULT_PORT
+from openfba.testing import TestRunner
 from ujotypes import UjoInt64
 
 from incrementer import Brick
diff --git a/examples/bricks/kafkawriter/kafkawriter.py b/examples/bricks/kafkawriter/kafkawriter.py
index 6fc5b2a..1b73485 100644
--- a/examples/bricks/kafkawriter/kafkawriter.py
+++ b/examples/bricks/kafkawriter/kafkawriter.py
@@ -2,7 +2,7 @@ import json
 
 from kafka import KafkaProducer
 
-from titanfe.brick import BrickBase
+from openfba.brick import BrickBase
 
 
 class Brick(BrickBase):
diff --git a/examples/bricks/modulebrick/__init__.py b/examples/bricks/modulebrick/__init__.py
index 2e7914f..7ed9810 100644
--- a/examples/bricks/modulebrick/__init__.py
+++ b/examples/bricks/modulebrick/__init__.py
@@ -21,8 +21,8 @@
 
 from ujotypes.variants import UjoUInt8
 
-import titanfe.log
-log = titanfe.log.getLogger(__name__)
+import openfba.log
+log = openfba.log.getLogger(__name__)
 
 
 def do_brick_processing(brick, params_ujo, input_ujo):
diff --git a/examples/bricks/rest_display/src/rest_display.py b/examples/bricks/rest_display/src/rest_display.py
index cde0d2b..99da098 100644
--- a/examples/bricks/rest_display/src/rest_display.py
+++ b/examples/bricks/rest_display/src/rest_display.py
@@ -5,10 +5,10 @@ from uvicorn import Server, Config
 from starlette.responses import HTMLResponse
 import time
 
-from titanfe.brick import BrickBase
-import titanfe.log
+from openfba.brick import BrickBase
+import openfba.log
 
-log = titanfe.log.getLogger(__name__)
+log = openfba.log.getLogger(__name__)
 
 HTML_head_start = "<html> <head > <meta charset = 'utf-8' > <style>"
 HTML_head_end = "</style> </head > <body > <table  style = 'width:20%' >"
diff --git a/examples/bricks/selector/selector.py b/examples/bricks/selector/selector.py
index 7ca685f..ea773ed 100644
--- a/examples/bricks/selector/selector.py
+++ b/examples/bricks/selector/selector.py
@@ -3,10 +3,10 @@
 
 import time
 
-from titanfe.brick import BrickBase
-import titanfe.log
+from openfba.brick import BrickBase
+import openfba.log
 
-log = titanfe.log.getLogger(__name__)
+log = openfba.log.getLogger(__name__)
 
 
 class Brick(BrickBase):
diff --git a/titanfe/apps/rmq_to_elastic/__init__.py b/openfba/__init__.py
similarity index 98%
rename from titanfe/apps/rmq_to_elastic/__init__.py
rename to openfba/__init__.py
index 331d18e..86f955d 100644
--- a/titanfe/apps/rmq_to_elastic/__init__.py
+++ b/openfba/__init__.py
@@ -3,4 +3,4 @@
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # found in the LICENSE file in the root directory of this source tree.
-#
+#
\ No newline at end of file
diff --git a/titanfe/__init__.py b/openfba/apps/__init__.py
similarity index 100%
rename from titanfe/__init__.py
rename to openfba/apps/__init__.py
diff --git a/titanfe/apps/__init__.py b/openfba/apps/brick_runner/__init__.py
similarity index 100%
rename from titanfe/apps/__init__.py
rename to openfba/apps/brick_runner/__init__.py
diff --git a/titanfe/apps/brick_runner/__main__.py b/openfba/apps/brick_runner/__main__.py
similarity index 86%
rename from titanfe/apps/brick_runner/__main__.py
rename to openfba/apps/brick_runner/__main__.py
index 8097967..de1b946 100644
--- a/titanfe/apps/brick_runner/__main__.py
+++ b/openfba/apps/brick_runner/__main__.py
@@ -13,9 +13,12 @@ import asyncio
 import pickle
 import sys
 
-from titanfe import log as logging
-from titanfe.apps.brick_runner.runner import BrickRunner
-from titanfe.config import configuration
+from openfba import log as logging
+from openfba.apps.brick_runner.runner import BrickRunner
+from openfba.config import configuration
+
+# to support legacy stuff
+sys.modules["titanfe"] = sys.modules["openfba"]
 
 if "win" in sys.platform:
     # Windows specific event-loop policy
diff --git a/titanfe/apps/brick_runner/adapter.py b/openfba/apps/brick_runner/adapter.py
similarity index 97%
rename from titanfe/apps/brick_runner/adapter.py
rename to openfba/apps/brick_runner/adapter.py
index 9956cd6..2a0653a 100644
--- a/titanfe/apps/brick_runner/adapter.py
+++ b/openfba/apps/brick_runner/adapter.py
@@ -10,8 +10,8 @@ from collections import namedtuple
 from dataclasses import dataclass
 from Crypto.Cipher import AES
 
-from titanfe.repository import RepositoryService
-from titanfe.utils import generate_key
+from openfba.repository import RepositoryService
+from openfba.utils import generate_key
 from ...config import configuration
 
 MetaData = namedtuple("MetaData", "uid name")
diff --git a/titanfe/apps/brick_runner/brick.py b/openfba/apps/brick_runner/brick.py
similarity index 94%
rename from titanfe/apps/brick_runner/brick.py
rename to openfba/apps/brick_runner/brick.py
index 9500e37..d4e2a98 100644
--- a/titanfe/apps/brick_runner/brick.py
+++ b/openfba/apps/brick_runner/brick.py
@@ -14,12 +14,12 @@ import janus
 
 from ujotypes import UjoBase
 
-from titanfe import log as logging
-from titanfe.apps.control_peer.brick import BrickInstanceDefinition
-from titanfe.brick import InletBrickBase
-from titanfe.constants import DEFAULT_PORT
-from titanfe.ujo_helper import python_to_ujo
-from titanfe.utils import get_module, time_delta_in_ms
+from openfba import log as logging
+from openfba.apps.control_peer.brick import BrickInstanceDefinition
+from openfba.brick import InletBrickBase
+from openfba.constants import DEFAULT_PORT
+from openfba.ujo_helper import python_to_ujo
+from openfba.utils import get_module, time_delta_in_ms
 
 from .adapter import BrickAdapter, AdapterMeta
 from .output import Output
diff --git a/titanfe/apps/brick_runner/input.py b/openfba/apps/brick_runner/input.py
similarity index 95%
rename from titanfe/apps/brick_runner/input.py
rename to openfba/apps/brick_runner/input.py
index 9f72c1d..cbaeb5a 100644
--- a/titanfe/apps/brick_runner/input.py
+++ b/openfba/apps/brick_runner/input.py
@@ -11,8 +11,8 @@ import functools
 import typing as T
 
 
-import titanfe.log
-from titanfe.apps.brick_runner.packet import Packet
+import openfba.log
+from openfba.apps.brick_runner.packet import Packet
 
 Port = T.NewType("Port", str)
 
@@ -27,7 +27,7 @@ class Input:
         max_idle_time=60,
         logger=None,
     ):
-        self.log = logger.getChild("Input") if logger else titanfe.log.getLogger(__name__)
+        self.log = logger.getChild("Input") if logger else openfba.log.getLogger(__name__)
         self.transport = transport
 
         self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
diff --git a/titanfe/apps/brick_runner/metrics.py b/openfba/apps/brick_runner/metrics.py
similarity index 96%
rename from titanfe/apps/brick_runner/metrics.py
rename to openfba/apps/brick_runner/metrics.py
index 842767a..ce31f1c 100644
--- a/titanfe/apps/brick_runner/metrics.py
+++ b/openfba/apps/brick_runner/metrics.py
@@ -11,8 +11,8 @@ import platform
 from abc import ABC
 from dataclasses import dataclass, field
 
-import titanfe.log
-from titanfe.utils import DictConvertable, iso_utc_time_string
+import openfba.log
+from openfba.utils import DictConvertable, iso_utc_time_string
 
 
 RMQ_METRIC_EXCHANGE = "openfba.metrics"
@@ -29,7 +29,7 @@ class MetricEmitter:
     """
 
     def __init__(self, metrics_metadata, transport, logger):
-        self.log = logger.getChild("MetricEmitter") if logger else titanfe.log.getLogger(__name__)
+        self.log = logger.getChild("MetricEmitter") if logger else openfba.log.getLogger(__name__)
         self.metrics_meta = metrics_metadata
         self.transport = transport
         self.transport.channel.exchange_declare(
diff --git a/titanfe/apps/brick_runner/output.py b/openfba/apps/brick_runner/output.py
similarity index 92%
rename from titanfe/apps/brick_runner/output.py
rename to openfba/apps/brick_runner/output.py
index 5ca544f..8fbe02d 100644
--- a/titanfe/apps/brick_runner/output.py
+++ b/openfba/apps/brick_runner/output.py
@@ -13,10 +13,10 @@ from dataclasses import dataclass
 from UJOSchema import schema_to_type
 from ujotypes import UjoBase, UJO_VARIANT_NONE
 
-import titanfe.log
-from titanfe.apps.brick_runner.packet import Packet
-from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
-from titanfe.apps.control_peer.brick import Connection
+import openfba.log
+from openfba.apps.brick_runner.packet import Packet
+from openfba.apps.brick_runner.value_mapping import BufferDescription, MappingRules
+from openfba.apps.control_peer.brick import Connection
 
 
 @dataclass
@@ -42,7 +42,7 @@ class Output:
         exchange_name,
         logger=None,
     ):
-        self.log = logger.getChild("Output") if logger else titanfe.log.getLogger(__name__)
+        self.log = logger.getChild("Output") if logger else openfba.log.getLogger(__name__)
 
         self.ports: T.Dict[str, Port] = {
             port_name: Port(
diff --git a/titanfe/apps/brick_runner/packet.py b/openfba/apps/brick_runner/packet.py
similarity index 93%
rename from titanfe/apps/brick_runner/packet.py
rename to openfba/apps/brick_runner/packet.py
index 6f2433c..0b2a922 100644
--- a/titanfe/apps/brick_runner/packet.py
+++ b/openfba/apps/brick_runner/packet.py
@@ -15,10 +15,10 @@ from dataclasses import dataclass, field
 from ujotypes import UjoBase, UjoStringUTF8, read_buffer, UjoMap, ujo_to_python
 from ujotypes.variants.none import UjoNone
 
-from titanfe.messages import PacketMessage
-from titanfe.ujo_helper import py_to_ujo_bytes
-from titanfe.utils import create_uid, ns_to_ms, time_delta_in_ms, DictConvertable
-from titanfe.apps.brick_runner.value_mapping import Buffer
+from openfba.messages import PacketMessage
+from openfba.ujo_helper import py_to_ujo_bytes
+from openfba.utils import create_uid, ns_to_ms, time_delta_in_ms, DictConvertable
+from openfba.apps.brick_runner.value_mapping import Buffer
 
 
 # ENCODING = "PICKLE"
diff --git a/titanfe/apps/brick_runner/runner.py b/openfba/apps/brick_runner/runner.py
similarity index 97%
rename from titanfe/apps/brick_runner/runner.py
rename to openfba/apps/brick_runner/runner.py
index 1c920e2..ac2bdea 100644
--- a/titanfe/apps/brick_runner/runner.py
+++ b/openfba/apps/brick_runner/runner.py
@@ -12,9 +12,9 @@ import os
 import signal
 
 
-from titanfe.apps.control_peer.brick import BrickInstanceDefinition
-from titanfe import log as logging
-from titanfe.rabbitmq import RMQ, AsyncRMQ
+from openfba.apps.control_peer.brick import BrickInstanceDefinition
+from openfba import log as logging
+from openfba.rabbitmq import RMQ, AsyncRMQ
 
 from .brick import Brick
 from .input import Input
diff --git a/titanfe/apps/brick_runner/value_mapping.py b/openfba/apps/brick_runner/value_mapping.py
similarity index 99%
rename from titanfe/apps/brick_runner/value_mapping.py
rename to openfba/apps/brick_runner/value_mapping.py
index 9cc9fa3..0b5fec3 100644
--- a/titanfe/apps/brick_runner/value_mapping.py
+++ b/openfba/apps/brick_runner/value_mapping.py
@@ -15,7 +15,7 @@ from collections.abc import MutableMapping, Mapping
 
 from ujotypes import UjoStringUTF8, UjoMap, UjoBase, UjoList, UjoNone
 
-from titanfe.ujo_helper import get_ujo_value, python_to_ujo
+from openfba.ujo_helper import get_ujo_value, python_to_ujo
 
 CONST = "constant"
 BUFFER = "buffer"
diff --git a/titanfe/apps/brick_runner/__init__.py b/openfba/apps/control_peer/__init__.py
similarity index 100%
rename from titanfe/apps/brick_runner/__init__.py
rename to openfba/apps/control_peer/__init__.py
diff --git a/titanfe/apps/control_peer/__main__.py b/openfba/apps/control_peer/__main__.py
similarity index 88%
rename from titanfe/apps/control_peer/__main__.py
rename to openfba/apps/control_peer/__main__.py
index 8e825de..953289a 100644
--- a/titanfe/apps/control_peer/__main__.py
+++ b/openfba/apps/control_peer/__main__.py
@@ -16,11 +16,15 @@ from pathlib import Path
 import os
 import site
 
-import titanfe.log
-from titanfe.apps.control_peer.control_peer import ControlPeer
-from titanfe.config import configuration
+import openfba.log
+from openfba.apps.control_peer.control_peer import ControlPeer
+from openfba.config import configuration
 
-log = titanfe.log.getLogger(__name__)
+# to support legacy stuff
+import sys
+sys.modules["titanfe"] = sys.modules["openfba"]
+
+log = openfba.log.getLogger(__name__)
 
 
 async def run_app(args):
@@ -47,7 +51,7 @@ def main():
     arg_parser.add_argument(
         "-brick_folder",
         help="Brick folder",
-        default=str(Path.home() / "titanfe/bricks"),
+        default=str(Path.home() / "openfba/bricks"),
     )
     arg_parser.add_argument(
         "-config_file",
diff --git a/titanfe/apps/control_peer/brick.py b/openfba/apps/control_peer/brick.py
similarity index 99%
rename from titanfe/apps/control_peer/brick.py
rename to openfba/apps/control_peer/brick.py
index f458bc1..2b2f1c8 100644
--- a/titanfe/apps/control_peer/brick.py
+++ b/openfba/apps/control_peer/brick.py
@@ -20,9 +20,9 @@ import os
 import subprocess
 import typing as T
 
-from titanfe.constants import GET_PIP
-from titanfe import log as logging
-from titanfe.utils import truncate
+from openfba.constants import GET_PIP
+from openfba import log as logging
+from openfba.utils import truncate
 from .services import package_manager
 from ...config import configuration
 
diff --git a/titanfe/apps/control_peer/config.yaml b/openfba/apps/control_peer/config.yaml
similarity index 100%
rename from titanfe/apps/control_peer/config.yaml
rename to openfba/apps/control_peer/config.yaml
diff --git a/titanfe/apps/control_peer/control_peer.py b/openfba/apps/control_peer/control_peer.py
similarity index 98%
rename from titanfe/apps/control_peer/control_peer.py
rename to openfba/apps/control_peer/control_peer.py
index 3ec941f..591caf0 100644
--- a/titanfe/apps/control_peer/control_peer.py
+++ b/openfba/apps/control_peer/control_peer.py
@@ -12,8 +12,8 @@ import signal
 from collections import defaultdict
 from concurrent.futures.thread import ThreadPoolExecutor
 
-from titanfe import log as logging
-from titanfe.log import TitanLogAdapter, FlowContext
+from openfba import log as logging
+from openfba.log import TitanLogAdapter, FlowContext
 from .services import package_manager, grid_manager, ServiceError
 from .brick import BrickBaseDefinition
 from .runner import BrickRunner
diff --git a/titanfe/apps/control_peer/runner.py b/openfba/apps/control_peer/runner.py
similarity index 95%
rename from titanfe/apps/control_peer/runner.py
rename to openfba/apps/control_peer/runner.py
index b7c40c8..79e1a60 100644
--- a/titanfe/apps/control_peer/runner.py
+++ b/openfba/apps/control_peer/runner.py
@@ -12,9 +12,9 @@ import pickle
 import signal
 import subprocess
 
-from titanfe import log as logging
-from titanfe.config import configuration
-from titanfe.utils import create_uid, Flag
+from openfba import log as logging
+from openfba.config import configuration
+from openfba.utils import create_uid, Flag
 
 
 class BrickRunner:
@@ -54,7 +54,7 @@ class BrickRunner:
         br_command = [
             self.brick.base.exe,
             "-m",
-            "titanfe.apps.brick_runner",
+            "openfba.apps.brick_runner",
             "-id",
             str(self.uid),
             "-configuration",
diff --git a/titanfe/apps/control_peer/services.py b/openfba/apps/control_peer/services.py
similarity index 98%
rename from titanfe/apps/control_peer/services.py
rename to openfba/apps/control_peer/services.py
index 9ad8428..ee10370 100644
--- a/titanfe/apps/control_peer/services.py
+++ b/openfba/apps/control_peer/services.py
@@ -16,8 +16,8 @@ from aiohttp.client_exceptions import ClientError
 # because we don't know how many threads are used and each will have its own asyncio loop
 # there must be a better way, but right now I'm short on time...
 
-from titanfe import log as logging
-from titanfe.config import configuration
+from openfba import log as logging
+from openfba.config import configuration
 
 log = logging.getLogger(__name__)
 
diff --git a/titanfe/apps/control_peer/webapi/__init__.py b/openfba/apps/control_peer/webapi/__init__.py
similarity index 100%
rename from titanfe/apps/control_peer/webapi/__init__.py
rename to openfba/apps/control_peer/webapi/__init__.py
diff --git a/titanfe/apps/control_peer/webapi/app.py b/openfba/apps/control_peer/webapi/app.py
similarity index 98%
rename from titanfe/apps/control_peer/webapi/app.py
rename to openfba/apps/control_peer/webapi/app.py
index d2acf6c..209795a 100644
--- a/titanfe/apps/control_peer/webapi/app.py
+++ b/openfba/apps/control_peer/webapi/app.py
@@ -12,7 +12,7 @@ from fastapi import FastAPI
 from pydantic import BaseModel  # pylint: disable=no-name-in-module
 from uvicorn import Server, Config
 
-from titanfe.config import configuration
+from openfba.config import configuration
 from .bricks import create_brick_router
 from .flows import create_flow_router
 from .state import create_state_router
diff --git a/titanfe/apps/control_peer/webapi/bricks.py b/openfba/apps/control_peer/webapi/bricks.py
similarity index 95%
rename from titanfe/apps/control_peer/webapi/bricks.py
rename to openfba/apps/control_peer/webapi/bricks.py
index d644543..9f6ad34 100644
--- a/titanfe/apps/control_peer/webapi/bricks.py
+++ b/openfba/apps/control_peer/webapi/bricks.py
@@ -11,8 +11,8 @@ from fastapi import APIRouter
 from pydantic import BaseModel  # pylint: disable=no-name-in-module
 
 # Request Parameter
-from titanfe.apps.control_peer.brick import BrickInstanceDefinition, BrickBaseDefinition
-from titanfe import log as logging
+from openfba.apps.control_peer.brick import BrickInstanceDefinition, BrickBaseDefinition
+from openfba import log as logging
 
 
 log = logging.getLogger(__name__)
diff --git a/titanfe/apps/control_peer/webapi/flows.py b/openfba/apps/control_peer/webapi/flows.py
similarity index 100%
rename from titanfe/apps/control_peer/webapi/flows.py
rename to openfba/apps/control_peer/webapi/flows.py
diff --git a/titanfe/apps/control_peer/webapi/state.py b/openfba/apps/control_peer/webapi/state.py
similarity index 100%
rename from titanfe/apps/control_peer/webapi/state.py
rename to openfba/apps/control_peer/webapi/state.py
diff --git a/titanfe/apps/control_peer/__init__.py b/openfba/apps/kafka_to_elastic/__init__.py
similarity index 100%
rename from titanfe/apps/control_peer/__init__.py
rename to openfba/apps/kafka_to_elastic/__init__.py
diff --git a/titanfe/apps/kafka_to_elastic/__main__.py b/openfba/apps/kafka_to_elastic/__main__.py
similarity index 99%
rename from titanfe/apps/kafka_to_elastic/__main__.py
rename to openfba/apps/kafka_to_elastic/__main__.py
index a4577b4..a7ac12a 100644
--- a/titanfe/apps/kafka_to_elastic/__main__.py
+++ b/openfba/apps/kafka_to_elastic/__main__.py
@@ -72,8 +72,8 @@ async def main():
         "-t",
         "--topics",
         nargs="+",
-        default=["titanfe.metrics"],
-        help="topics of the flowengine logs`<one or more topics>` [default: titanfe.metrics]",
+        default=["openfba.metrics"],
+        help="topics of the flowengine logs`<one or more topics>` [default: openfba.metrics]",
     )
     args = arg_parser.parse_args()
 
diff --git a/titanfe/apps/kafka_to_elastic/__init__.py b/openfba/apps/kafka_viewer/__init__.py
similarity index 100%
rename from titanfe/apps/kafka_to_elastic/__init__.py
rename to openfba/apps/kafka_viewer/__init__.py
diff --git a/titanfe/apps/kafka_viewer/__main__.py b/openfba/apps/kafka_viewer/__main__.py
similarity index 96%
rename from titanfe/apps/kafka_viewer/__main__.py
rename to openfba/apps/kafka_viewer/__main__.py
index 556bad8..0ab77d3 100644
--- a/titanfe/apps/kafka_viewer/__main__.py
+++ b/openfba/apps/kafka_viewer/__main__.py
@@ -36,7 +36,7 @@ def main():
     arg_parser.add_argument(
         "-topics",
         help="list of topics 'topic1 topic2",
-        default="titanfe.metrics titanfe.logs",
+        default="openfba.metrics openfba.logs",
     )
     arg_parser.add_argument(
         "-kafka",
diff --git a/titanfe/apps/kafka_viewer/__init__.py b/openfba/apps/rmq_to_elastic/__init__.py
similarity index 100%
rename from titanfe/apps/kafka_viewer/__init__.py
rename to openfba/apps/rmq_to_elastic/__init__.py
diff --git a/titanfe/apps/rmq_to_elastic/__main__.py b/openfba/apps/rmq_to_elastic/__main__.py
similarity index 100%
rename from titanfe/apps/rmq_to_elastic/__main__.py
rename to openfba/apps/rmq_to_elastic/__main__.py
diff --git a/titanfe/brick.py b/openfba/brick.py
similarity index 97%
rename from titanfe/brick.py
rename to openfba/brick.py
index d07699e..5a69ee6 100644
--- a/titanfe/brick.py
+++ b/openfba/brick.py
@@ -12,7 +12,7 @@ from typing import Type, Optional, Dict
 
 from ujotypes import UjoBase
 
-from titanfe.apps.brick_runner.adapter import BrickAdapter
+from openfba.apps.brick_runner.adapter import BrickAdapter
 
 
 class ConfigurationError(Exception):
diff --git a/titanfe/config.py b/openfba/config.py
similarity index 95%
rename from titanfe/config.py
rename to openfba/config.py
index f5cf1ff..a8f3604 100644
--- a/titanfe/config.py
+++ b/openfba/config.py
@@ -1,106 +1,106 @@
-#
-# Copyright (c) 2019-present, wobe-systems GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# found in the LICENSE file in the root directory of this source tree.
-#
-
-""" the global configuration """
-import urllib.parse
-from dataclasses import dataclass
-import os
-
-# pylint: disable=invalid-name
-from pathlib import Path
-from typing import Union
-
-from ruamel.yaml import YAMLError, YAML
-
-DEFAULT_RABBITMQ_URL = "amqp://guest:guest@localhost:5672"
-DEFAULT_GRIDMANAGER_ADDRESS = "http://localhost:8080/gridmanager"
-DEFAULT_FLOWMANAGER_ADDRESS = "http://localhost:9002/flowmanager"
-DEFAULT_PACKAGEMANAGER_ADDRESS = "http://localhost:8087/packagemanager"
-DEFAULT_REPOSERVICE_ADDRESS = "http://localhost:8085/object"
-DEFAULT_ENDPOINTPROVIDER_ADDRESS = "tcp://127.0.0.1:9021"
-VALID_KEY_LENGTHS = (16, 24, 32)
-
-
-class NotFound:  # pylint: disable=too-few-public-methods
-    def __bool__(self):
-        return False
-
-
-NOTFOUND = NotFound()
-
-OPTION_ALIASES = {
-    "IP": "IP",
-    "gridmanager_address": "GridManager",
-    "flowmanager_address": "FlowManager",
-    "packagemanager_address": "PackageManager",
-    "reposervice_address": "RepositoryService",
-    "rabbitmq_url": "RabbitMQUrl",
-    "brick_folder": "BrickFolder",
-    "secret_key": "SecretKey",
-    "endpoint_provider": "EndpointProvider",
-}
-
-
-@dataclass
-class RabbitMQConnectionParams:
-    host: str
-    port: int
-    user: str
-    password: str
-
-
-@dataclass
-class Configuration:
-    """Current Configuration"""
-
-    rabbitmq_url: str = DEFAULT_RABBITMQ_URL
-    gridmanager_address: str = DEFAULT_GRIDMANAGER_ADDRESS
-    flowmanager_address: str = DEFAULT_FLOWMANAGER_ADDRESS
-    packagemanager_address: str = DEFAULT_PACKAGEMANAGER_ADDRESS
-    reposervice_address: str = DEFAULT_REPOSERVICE_ADDRESS
-    secret_key: str = os.getenv("TITAN_SECRET_KEY") or None
-    endpoint_provider: str = DEFAULT_ENDPOINTPROVIDER_ADDRESS
-    IP: str = None
-
-    brick_folder: str = str(Path.home() / "titanfe/bricks")
-
-    def update(self, config: Union["Configuration", dict]):
-        """update config from dict or other config"""
-        for attr, alias in OPTION_ALIASES.items():
-            if isinstance(config, Configuration):
-                value = getattr(config, attr, NOTFOUND)
-            else:
-                value = config.get(attr, NOTFOUND) or config.get(alias, NOTFOUND)
-
-            if value is NOTFOUND:
-                continue
-
-            setattr(self, attr, value)
-
-    def update_from_yaml(self, file_path):
-        """Read and update the configuration from a yaml file"""
-        try:
-            with open(file_path) as f:  # pylint: disable=unspecified-encoding
-                config = YAML(typ='safe', pure=True).load(f)
-            self.update(config)
-        except OSError as error:
-            print("Could not read config file", file_path, "-", error)
-        except YAMLError as error:
-            print("Could not parse config file", file_path, "-", error)
-
-    @property
-    def rabbitmq_params(self):
-        parsed_url = urllib.parse.urlparse(self.rabbitmq_url)
-        return RabbitMQConnectionParams(
-            host=parsed_url.hostname,
-            port=parsed_url.port,
-            user=urllib.parse.unquote(parsed_url.username) or None,
-            password=urllib.parse.unquote(parsed_url.password) or None,
-        )
-
-
-configuration = Configuration()
+#
+# Copyright (c) 2019-present, wobe-systems GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# found in the LICENSE file in the root directory of this source tree.
+#
+
+""" the global configuration """
+import urllib.parse
+from dataclasses import dataclass
+import os
+
+# pylint: disable=invalid-name
+from pathlib import Path
+from typing import Union
+
+from ruamel.yaml import YAMLError, YAML
+
+DEFAULT_RABBITMQ_URL = "amqp://guest:guest@localhost:5672"
+DEFAULT_GRIDMANAGER_ADDRESS = "http://localhost:8080/gridmanager"
+DEFAULT_FLOWMANAGER_ADDRESS = "http://localhost:9002/flowmanager"
+DEFAULT_PACKAGEMANAGER_ADDRESS = "http://localhost:8087/packagemanager"
+DEFAULT_REPOSERVICE_ADDRESS = "http://localhost:8085/object"
+DEFAULT_ENDPOINTPROVIDER_ADDRESS = "tcp://127.0.0.1:9021"
+VALID_KEY_LENGTHS = (16, 24, 32)
+
+
+class NotFound:  # pylint: disable=too-few-public-methods
+    def __bool__(self):
+        return False
+
+
+NOTFOUND = NotFound()
+
+OPTION_ALIASES = {
+    "IP": "IP",
+    "gridmanager_address": "GridManager",
+    "flowmanager_address": "FlowManager",
+    "packagemanager_address": "PackageManager",
+    "reposervice_address": "RepositoryService",
+    "rabbitmq_url": "RabbitMQUrl",
+    "brick_folder": "BrickFolder",
+    "secret_key": "SecretKey",
+    "endpoint_provider": "EndpointProvider",
+}
+
+
+@dataclass
+class RabbitMQConnectionParams:
+    host: str
+    port: int
+    user: str
+    password: str
+
+
+@dataclass
+class Configuration:
+    """Current Configuration"""
+
+    rabbitmq_url: str = DEFAULT_RABBITMQ_URL
+    gridmanager_address: str = DEFAULT_GRIDMANAGER_ADDRESS
+    flowmanager_address: str = DEFAULT_FLOWMANAGER_ADDRESS
+    packagemanager_address: str = DEFAULT_PACKAGEMANAGER_ADDRESS
+    reposervice_address: str = DEFAULT_REPOSERVICE_ADDRESS
+    secret_key: str = os.getenv("TITAN_SECRET_KEY") or None
+    endpoint_provider: str = DEFAULT_ENDPOINTPROVIDER_ADDRESS
+    IP: str = None
+
+    brick_folder: str = str(Path.home() / "openfba/bricks")
+
+    def update(self, config: Union["Configuration", dict]):
+        """update config from dict or other config"""
+        for attr, alias in OPTION_ALIASES.items():
+            if isinstance(config, Configuration):
+                value = getattr(config, attr, NOTFOUND)
+            else:
+                value = config.get(attr, NOTFOUND) or config.get(alias, NOTFOUND)
+
+            if value is NOTFOUND:
+                continue
+
+            setattr(self, attr, value)
+
+    def update_from_yaml(self, file_path):
+        """Read and update the configuration from a yaml file"""
+        try:
+            with open(file_path) as f:  # pylint: disable=unspecified-encoding
+                config = YAML(typ='safe', pure=True).load(f)
+            self.update(config)
+        except OSError as error:
+            print("Could not read config file", file_path, "-", error)
+        except YAMLError as error:
+            print("Could not parse config file", file_path, "-", error)
+
+    @property
+    def rabbitmq_params(self):
+        parsed_url = urllib.parse.urlparse(self.rabbitmq_url)
+        return RabbitMQConnectionParams(
+            host=parsed_url.hostname,
+            port=parsed_url.port,
+            user=urllib.parse.unquote(parsed_url.username) or None,
+            password=urllib.parse.unquote(parsed_url.password) or None,
+        )
+
+
+configuration = Configuration()
diff --git a/titanfe/connection.py b/openfba/connection.py
similarity index 95%
rename from titanfe/connection.py
rename to openfba/connection.py
index 6ecccd7..72c5e5d 100644
--- a/titanfe/connection.py
+++ b/openfba/connection.py
@@ -16,10 +16,10 @@ from typing import Optional
 
 from ujotypes import UjoMap, read_buffer, ujo_to_python, UjoStringUTF8
 
-import titanfe.log
-from titanfe.apps.brick_runner.value_mapping import Buffer
-from titanfe.ujo_helper import py_to_ujo_bytes
-from titanfe.messages import Message
+import openfba.log
+from openfba.apps.brick_runner.value_mapping import Buffer
+from openfba.ujo_helper import py_to_ujo_bytes
+from openfba.messages import Message
 
 ENCODING = "UJO"
 # ENCODING = "PICKLE"
@@ -73,7 +73,7 @@ class Connection:
         self.writer = writer
         self.closed = False
 
-        self.log = log.getChild("Connection") if log else titanfe.log.getLogger(__name__)
+        self.log = log.getChild("Connection") if log else openfba.log.getLogger(__name__)
 
         if encoding == "PICKLE":
             self.decode = pickle.loads
diff --git a/titanfe/constants.py b/openfba/constants.py
similarity index 100%
rename from titanfe/constants.py
rename to openfba/constants.py
diff --git a/titanfe/get-pip.py b/openfba/get-pip.py
similarity index 100%
rename from titanfe/get-pip.py
rename to openfba/get-pip.py
diff --git a/titanfe/log.py b/openfba/log.py
similarity index 96%
rename from titanfe/log.py
rename to openfba/log.py
index 423a327..169f916 100644
--- a/titanfe/log.py
+++ b/openfba/log.py
@@ -19,8 +19,8 @@ from datetime import datetime
 
 import ruamel.yaml
 
-from titanfe.rabbitmq import RMQ
-from titanfe.ujo_helper import py_to_ujo_bytes
+from openfba.rabbitmq import RMQ
+from openfba.ujo_helper import py_to_ujo_bytes
 
 RMQ_LOG_EXCHANGE = "openfba.logging"
 
@@ -46,11 +46,11 @@ class FlowContext:
     brickname: str = ""
 
     @classmethod
-    def from_flow(cls, flow: "titanfe.apps.control_peer.flow.Flow"):  # noqa
+    def from_flow(cls, flow: "openfba.apps.control_peer.flow.Flow"):  # noqa
         return cls(flow.uid, flow.name)
 
     @classmethod
-    def from_brick(cls, brick: "titanfe.apps.control_peer.brick.Brick"):  # noqa
+    def from_brick(cls, brick: "openfba.apps.control_peer.brick.Brick"):  # noqa
         return cls(brick.flow.uid, brick.flow.name, brick.uid, brick.name)
 
     def asdict(self):
@@ -117,8 +117,8 @@ def getLogger(  # pylint: disable=invalid-name ; noqa: N802
     Returns:
         logging.Logger: a Logger
     """
-    if not name.startswith("titanfe."):
-        name = f"titanfe.bricks.{name}"
+    if not name.startswith("openfba."):
+        name = f"openfba.bricks.{name}"
 
     logger = logging.getLogger(name)
 
diff --git a/titanfe/log_config.yml b/openfba/log_config.yml
similarity index 94%
rename from titanfe/log_config.yml
rename to openfba/log_config.yml
index 59c1dc8..0af487a 100644
--- a/titanfe/log_config.yml
+++ b/openfba/log_config.yml
@@ -13,7 +13,7 @@ handlers:
     stream: ext://sys.stdout
 
 loggers:
-  titanfe:
+  openfba:
     level:
       #DEBUG
       #METRIC
@@ -21,7 +21,7 @@ loggers:
       WARNING
       #ERROR
 
-  titanfe.bricks:
+  openfba.bricks:
     level:
       #DEBUG
       INFO
diff --git a/titanfe/messages.py b/openfba/messages.py
similarity index 100%
rename from titanfe/messages.py
rename to openfba/messages.py
diff --git a/titanfe/rabbitmq.py b/openfba/rabbitmq.py
similarity index 98%
rename from titanfe/rabbitmq.py
rename to openfba/rabbitmq.py
index a5a7332..efc87e9 100644
--- a/titanfe/rabbitmq.py
+++ b/openfba/rabbitmq.py
@@ -13,7 +13,7 @@ import aioamqp.channel
 
 import pika
 
-from titanfe.config import configuration as config
+from openfba.config import configuration as config
 
 
 # TODO: Handle Reconnect?
diff --git a/titanfe/repository.py b/openfba/repository.py
similarity index 97%
rename from titanfe/repository.py
rename to openfba/repository.py
index fd86133..e0424c0 100644
--- a/titanfe/repository.py
+++ b/openfba/repository.py
@@ -16,8 +16,8 @@ import requests
 
 from dataclasses_json import dataclass_json
 
-from titanfe.constants import BRICKRUNNER_DATABASE
-from titanfe.config import configuration
+from openfba.constants import BRICKRUNNER_DATABASE
+from openfba.config import configuration
 
 
 @dataclass_json
diff --git a/titanfe/testing/__init__.py b/openfba/testing/__init__.py
similarity index 100%
rename from titanfe/testing/__init__.py
rename to openfba/testing/__init__.py
diff --git a/titanfe/testing/testrunner.py b/openfba/testing/testrunner.py
similarity index 94%
rename from titanfe/testing/testrunner.py
rename to openfba/testing/testrunner.py
index a63b600..ae81857 100644
--- a/titanfe/testing/testrunner.py
+++ b/openfba/testing/testrunner.py
@@ -21,15 +21,15 @@ from unittest.mock import MagicMock
 
 import janus
 
-from titanfe.apps.brick_runner.input import Input as OriginalInput
-from titanfe.apps.brick_runner.brick import Brick
-from titanfe.apps.brick_runner.packet import Packet
-from titanfe.apps.brick_runner.runner import BrickRunner
-from titanfe.apps.control_peer.brick import BrickInstanceDefinition
-from titanfe.brick import InletBrickBase
-from titanfe.constants import DEFAULT_PORT
-from titanfe.log import TitanPlatformLogger
-from titanfe.utils import create_uid
+from openfba.apps.brick_runner.input import Input as OriginalInput
+from openfba.apps.brick_runner.brick import Brick
+from openfba.apps.brick_runner.packet import Packet
+from openfba.apps.brick_runner.runner import BrickRunner
+from openfba.apps.control_peer.brick import BrickInstanceDefinition
+from openfba.brick import InletBrickBase
+from openfba.constants import DEFAULT_PORT
+from openfba.log import TitanPlatformLogger
+from openfba.utils import create_uid
 
 logging.basicConfig(
     stream=sys.stdout,
diff --git a/titanfe/ujo_helper.py b/openfba/ujo_helper.py
similarity index 100%
rename from titanfe/ujo_helper.py
rename to openfba/ujo_helper.py
diff --git a/titanfe/utils.py b/openfba/utils.py
similarity index 99%
rename from titanfe/utils.py
rename to openfba/utils.py
index 0318bc8..7e3e1e8 100644
--- a/titanfe/utils.py
+++ b/openfba/utils.py
@@ -23,7 +23,7 @@ from datetime import datetime
 from pathlib import Path
 from types import ModuleType
 
-from titanfe import log as logging
+from openfba import log as logging
 
 log = logging.getLogger(__name__)
 
diff --git a/setup.py b/setup.py
index 9bc1478..938c063 100644
--- a/setup.py
+++ b/setup.py
@@ -23,9 +23,9 @@ except:
     VERSION = BASEVERSION + ".1"
 
 setup(
-    name="titanfe",
+    name="openfba",
     version=VERSION,
-    description="titan Data Flow Engine for Python",
+    description="OpenFBA Data Flow Engine",
     long_description=long_description,
     long_description_content_type="text/markdown",
     author="wobe-systems GmbH",
@@ -38,29 +38,32 @@ setup(
     },
     license="Apache License, Version 2.0",
     classifiers=[
-        "Programming Language :: Python :: 3",
         "License :: OSI Approved :: Apache Software License",
         "Operating System :: Microsoft :: Windows",
         "Operating System :: POSIX :: Linux",
         "Development Status :: 3 - Alpha",
         "Programming Language :: Python :: 3 :: Only",
         "Programming Language :: Python :: 3.7",
+        "Programming Language :: Python :: 3.8",
+        "Programming Language :: Python :: 3.9",
+        "Programming Language :: Python :: 3.10",
+        "Programming Language :: Python :: 3.11",
+        "Programming Language :: Python :: 3.12",
     ],
     platforms=["Windows", "Linux"],
     packages=[
-        "titanfe",
-        "titanfe.apps",
-        "titanfe.apps.control_peer",
-        "titanfe.apps.control_peer.webapi",
-        "titanfe.apps.brick_runner",
-        #"titanfe.apps.brick_runner.output",
-        "titanfe.apps.kafka_to_elastic",
-        "titanfe.apps.kafka_viewer",
-        "titanfe.testing",
+        "openfba",
+        "openfba.apps",
+        "openfba.apps.control_peer",
+        "openfba.apps.control_peer.webapi",
+        "openfba.apps.brick_runner",
+        "openfba.apps.kafka_to_elastic",
+        "openfba.apps.kafka_viewer",
+        "openfba.testing",
     ],
-    package_dir={"titanfe": "titanfe"},
+    package_dir={"openfba": "openfba"},
     include_package_data=True,
-    package_data={"titanfe": ["log_config.yml",
+    package_data={"openfba": ["log_config.yml",
                               "apps/control_peer/config.yaml"]},
     install_requires=[
         "ujotypes",
@@ -69,7 +72,7 @@ setup(
         "janus",
         "elasticsearch",
         "fastapi",
-        "uvicorn"
+        "uvicorn",
         'uvloop ;platform_system=="Linux"',
         "aiohttp==3.9.0b",
         #"aiohttp-requests",
diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index ee1d1f6..dc29bd9 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -18,9 +18,9 @@ from unittest.mock import MagicMock, patch
 
 import pytest
 
-from titanfe.apps.brick_runner.metrics import MetricEmitter
-from titanfe.apps.brick_runner.runner import BrickRunner
-from titanfe.apps.control_peer.brick import BrickInstanceDefinition
+from openfba.apps.brick_runner.metrics import MetricEmitter
+from openfba.apps.brick_runner.runner import BrickRunner
+from openfba.apps.control_peer.brick import BrickInstanceDefinition
 
 # pylint: disable=redefined-outer-name
 
@@ -74,7 +74,7 @@ class InputTransportDouble:
 
 @pytest.fixture()
 def patched_input_transport():
-    input_transport_path = "titanfe.apps.brick_runner.runner.AsyncRMQ"
+    input_transport_path = "openfba.apps.brick_runner.runner.AsyncRMQ"
     with patch(input_transport_path, InputTransportDouble):
         yield
 
@@ -94,7 +94,7 @@ class OutputTransportDouble:  # pylint: disable=too-few-public-methods
 
 @pytest.fixture()
 def patched_output_transport():
-    input_transport_path = "titanfe.apps.brick_runner.runner.RMQ"
+    input_transport_path = "openfba.apps.brick_runner.runner.RMQ"
     with patch(input_transport_path, OutputTransportDouble):
         yield
 
@@ -104,7 +104,7 @@ def patched_output_transport():
 async def brick_runner(patched_input_transport, patched_output_transport):
     """set up a brick runner, with networking patched away"""
     # pylint: disable=unused-argument
-    guess_module_path = "titanfe.apps.control_peer.brick.BrickBaseDefinition.guess_module_path"
+    guess_module_path = "openfba.apps.control_peer.brick.BrickBaseDefinition.guess_module_path"
     with patch(guess_module_path, MagicMock(return_value=Path("n/a"))):
         brick_definition = BrickInstanceDefinition.from_gridmanager(
             {
@@ -164,7 +164,7 @@ async def brick_runner(patched_input_transport, patched_output_transport):
     module = types.ModuleType("BrickDummy")
     module.do_brick_processing = lambda *args, **kwargs: None
 
-    getmodule = "titanfe.apps.brick_runner.brick.get_module"
+    getmodule = "openfba.apps.brick_runner.brick.get_module"
     with patch(
         getmodule, MagicMock(return_value=module)
     ):
diff --git a/test/brick_runner/test_adapter.py b/test/brick_runner/test_adapter.py
index d21376e..440af98 100644
--- a/test/brick_runner/test_adapter.py
+++ b/test/brick_runner/test_adapter.py
@@ -10,8 +10,8 @@ Test the Adapter that's passed into the Brick-modules
 """
 from unittest.mock import MagicMock
 
-from titanfe.apps.brick_runner.adapter import BrickAdapter, AdapterMeta
-from titanfe.config import configuration
+from openfba.apps.brick_runner.adapter import BrickAdapter, AdapterMeta
+from openfba.config import configuration
 
 
 def test_get_state(httpserver):
diff --git a/test/brick_runner/test_metrics.py b/test/brick_runner/test_metrics.py
index 2a66301..2717126 100644
--- a/test/brick_runner/test_metrics.py
+++ b/test/brick_runner/test_metrics.py
@@ -13,7 +13,9 @@ import json
 
 import platform
 
-from titanfe.apps.brick_runner.packet import Packet
+import pytest
+
+from openfba.apps.brick_runner.packet import Packet
 
 
 class Ignored:   # pylint: disable=too-few-public-methods
@@ -21,6 +23,7 @@ class Ignored:   # pylint: disable=too-few-public-methods
         return True
 
 
+@pytest.mark.asyncio
 async def test_packet_metrics(metric_emitter):  # noqa: F811
     """assure the metrics for packets are complete"""
     packet = Packet(uid="P-Test")
@@ -54,6 +57,7 @@ async def test_packet_metrics(metric_emitter):  # noqa: F811
     assert expected_metrics == metrics
 
 
+@pytest.mark.asyncio
 async def test_brick_metrics(metric_emitter):  # noqa: F811
     """assure the metrics for brick execution times are complete"""
     expected_exchange = "openfba.metrics"
diff --git a/test/brick_runner/test_portMapping.py b/test/brick_runner/test_portMapping.py
index 6f10e4b..aabe25d 100644
--- a/test/brick_runner/test_portMapping.py
+++ b/test/brick_runner/test_portMapping.py
@@ -8,11 +8,11 @@
 
 from ujotypes import UjoStringUTF8, UjoInt64, UjoFloat64, UjoBool, UjoMap
 
-from titanfe.brick import BrickBase
-from titanfe.testing import TestRunner
-from titanfe.ujo_helper import python_to_ujo
-from titanfe.apps.brick_runner.packet import Buffer
-from titanfe.apps.brick_runner.value_mapping import BufferDescription, MappingRules
+from openfba.brick import BrickBase
+from openfba.testing import TestRunner
+from openfba.ujo_helper import python_to_ujo
+from openfba.apps.brick_runner.packet import Buffer
+from openfba.apps.brick_runner.value_mapping import BufferDescription, MappingRules
 
 TestRunner.__test__ = False  # prevent PytestCollectionWarning "cannot collect test class"
 
diff --git a/test/brick_runner/test_runner.py b/test/brick_runner/test_runner.py
index cb372ca..78ef642 100644
--- a/test/brick_runner/test_runner.py
+++ b/test/brick_runner/test_runner.py
@@ -17,10 +17,10 @@ import pytest
 
 from ujotypes import UjoStringC
 
-from titanfe.apps.brick_runner.value_mapping import Buffer
-from titanfe.apps.brick_runner.packet import Packet
-from titanfe.brick import BrickBase
-from titanfe.constants import DEFAULT_PORT
+from openfba.apps.brick_runner.value_mapping import Buffer
+from openfba.apps.brick_runner.packet import Packet
+from openfba.brick import BrickBase
+from openfba.constants import DEFAULT_PORT
 
 
 @pytest.mark.asyncio
diff --git a/test/control_peer/test_install_brick.py b/test/control_peer/test_install_brick.py
index 4614bf4..175a553 100644
--- a/test/control_peer/test_install_brick.py
+++ b/test/control_peer/test_install_brick.py
@@ -8,8 +8,8 @@ import pytest
 from aiohttp import web
 from aiohttp.test_utils import TestServer as HttpServer
 
-from titanfe.apps.control_peer.brick import BrickBaseDefinition
-from titanfe.config import configuration
+from openfba.apps.control_peer.brick import BrickBaseDefinition
+from openfba.config import configuration
 
 BRICK_ID = "Test-123"
 COMPRESSED_BRICK = (
@@ -37,7 +37,7 @@ def brick_parent_folder():
 
 @pytest.fixture
 def brick(brick_parent_folder):
-    create_virtual_env = "titanfe.apps.control_peer.brick.BrickBaseDefinition.create_virtual_env"
+    create_virtual_env = "openfba.apps.control_peer.brick.BrickBaseDefinition.create_virtual_env"
     with patch(create_virtual_env, MagicMock()):
         yield BrickBaseDefinition(uid=BRICK_ID, name="Any", family="Any", logger=MagicMock())
 
diff --git a/test/control_peer/test_repository.py b/test/control_peer/test_repository.py
index 5ccbedb..38d5d98 100644
--- a/test/control_peer/test_repository.py
+++ b/test/control_peer/test_repository.py
@@ -11,9 +11,9 @@ The the repository service connection
 
 import pytest
 
-from titanfe.config import configuration
-from titanfe.constants import BRICKRUNNER_DATABASE
-from titanfe.repository import RepositoryService
+from openfba.config import configuration
+from openfba.constants import BRICKRUNNER_DATABASE
+from openfba.repository import RepositoryService
 
 from unittest.mock import MagicMock
 
diff --git a/test/control_peer/test_webapi.py b/test/control_peer/test_webapi.py
index fb84dec..8c376b1 100644
--- a/test/control_peer/test_webapi.py
+++ b/test/control_peer/test_webapi.py
@@ -11,8 +11,8 @@ from unittest.mock import MagicMock
 import pytest
 from starlette.testclient import TestClient
 
-from titanfe.apps.control_peer.control_peer import ControlPeer
-from titanfe.apps.control_peer.webapi.app import WebApi
+from openfba.apps.control_peer.control_peer import ControlPeer
+from openfba.apps.control_peer.webapi.app import WebApi
 
 
 async def async_magic():
diff --git a/test/test_logging.py b/test/test_logging.py
index 9b841f8..d2d2400 100644
--- a/test/test_logging.py
+++ b/test/test_logging.py
@@ -14,7 +14,7 @@ from unittest.mock import patch, MagicMock
 
 from ujotypes import read_buffer
 
-from titanfe import log
+from openfba import log
 
 
 class Ignored:  # pylint: disable=too-few-public-methods
@@ -33,7 +33,7 @@ def test_log_record_is_titan_specific():
 def test_logging_sends_contextual_bin_ujo_via_rmq():
     """ assure the log record is sent to rabbitmq as a binary ujo map"""
 
-    rmq_location = "titanfe.log.RMQ"
+    rmq_location = "openfba.log.RMQ"
 
     class RMQDummy():
         """a RMQ Dummy"""
@@ -52,9 +52,9 @@ def test_logging_sends_contextual_bin_ujo_via_rmq():
             self.published.put((exchange_name, queue_name, message))
 
     with patch(rmq_location, RMQDummy):
-        log.initialize("titanfe-test")
+        log.initialize("openfba-test")
 
-    logger = log.getLogger("titanfe.test", context=log.FlowContext("FUID", "FLOW", "BUID", "BRICK"))
+    logger = log.getLogger("openfba.test", context=log.FlowContext("FUID", "FLOW", "BUID", "BRICK"))
     logger.error("Test")
 
     expected = {
@@ -64,14 +64,14 @@ def test_logging_sends_contextual_bin_ujo_via_rmq():
         'FlowUID': 'FUID',
         'Hostname': platform.node(),
         'Message': 'Test',
-        'Servicename': 'titanfe-test',
+        'Servicename': 'openfba-test',
         'Severity': 'ERROR',
-        'Source': 'titanfe.test',
+        'Source': 'openfba.test',
         'Timestamp': Ignored()
     }
 
     exchange, queue, message = RMQDummy.published.get(timeout=0.1)
 
     assert exchange == log.RMQ_LOG_EXCHANGE
-    assert queue == "titanfe.test"
+    assert queue == "openfba.test"
     assert read_buffer(message).as_pyobject() == expected
diff --git a/test/test_testrunner.py b/test/test_testrunner.py
index ee55e5e..e1c46a6 100644
--- a/test/test_testrunner.py
+++ b/test/test_testrunner.py
@@ -4,9 +4,9 @@
 
 from ujotypes import UjoInt32
 
-from titanfe.brick import BrickBase
-from titanfe.constants import DEFAULT_PORT
-from titanfe.testing import TestRunner
+from openfba.brick import BrickBase
+from openfba.constants import DEFAULT_PORT
+from openfba.testing import TestRunner
 
 
 class Brick(BrickBase):
diff --git a/titanfe/__init__.pyc b/titanfe/__init__.pyc
deleted file mode 100644
index d076f151e403f1e467144cba122145edcae7d4da..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 106
zcmZSn%**9yY#W!%00oRd+5w1*S%5?e14FO|NW@PANHCxg#lk?blFX9CytLGq`1s7c
d%#!$cy@JXT4xqSAZhlH>PO2S9cL@+P008un5eonS

diff --git a/titanfe/apps/__init__.pyc b/titanfe/apps/__init__.pyc
deleted file mode 100644
index 33ae2bd0af105c9784f486c91bc3404bda3ce003..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 111
zcmZSn%**9yY#W!%00oRd+5w1*S%5?e14FO|NW@PANHCxg#S%cVlFX9CytLGq#Daq2
inE3e2yv&mLc)fzk5)PoGO>TZlX-=vg$cPdkW&i+e023ep

diff --git a/titanfe/apps/control_peer/__init__.pyc b/titanfe/apps/control_peer/__init__.pyc
deleted file mode 100644
index 3e35f97969fb0ecc8047f1d79df10be11ade1d9f..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 124
zcmZSn%**9yY#W!%00oRd+5w1*S%5?e14FO|NW@PANHCxg#i~HDlFX9CytLGq#Daq2
vnB@GtlA`>a_=42bqL}#j%)HE!_;|g7$`THsN}JsLl+v73JCJ!LK+FID)yf!^

-- 
GitLab


From 8ab9208f082e712fa16bd937b964c3ab3803bc8e Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Mon, 6 Nov 2023 15:48:44 +0100
Subject: [PATCH 25/29] fix asyncio fixtures in tests

---
 openfba/apps/brick_runner/metrics.py    | 4 ++--
 test/brick_runner/conftest.py           | 4 ++--
 test/brick_runner/test_metrics.py       | 4 ++--
 test/control_peer/test_install_brick.py | 4 ++--
 4 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/openfba/apps/brick_runner/metrics.py b/openfba/apps/brick_runner/metrics.py
index ce31f1c..6bcb31e 100644
--- a/openfba/apps/brick_runner/metrics.py
+++ b/openfba/apps/brick_runner/metrics.py
@@ -90,7 +90,7 @@ class MetricsBase(DictConvertable, ABC):
 class PacketMetricsAtBrick(MetricsBase):
     """Metric data for a packet being processed at a Brick"""
 
-    content_type: str = "titan-packet-metrics"
+    content_type: str = "openfba-packet-metrics"
     packet: str = "PacketUid?"
     execution_time: float = 0.0
     traveling_time: float = 0.0
@@ -104,5 +104,5 @@ class PacketMetricsAtBrick(MetricsBase):
 class BrickMetrics(MetricsBase):
     """Metric data for brick executions"""
 
-    content_type: str = "titan-brick-metrics"
+    content_type: str = "openfba-brick-metrics"
     execution_time: float = 0.0
diff --git a/test/brick_runner/conftest.py b/test/brick_runner/conftest.py
index dc29bd9..32ac5a5 100644
--- a/test/brick_runner/conftest.py
+++ b/test/brick_runner/conftest.py
@@ -17,6 +17,7 @@ from pathlib import Path
 from unittest.mock import MagicMock, patch
 
 import pytest
+import pytest_asyncio
 
 from openfba.apps.brick_runner.metrics import MetricEmitter
 from openfba.apps.brick_runner.runner import BrickRunner
@@ -99,8 +100,7 @@ def patched_output_transport():
         yield
 
 
-@pytest.mark.asyncio
-@pytest.fixture()
+@pytest_asyncio.fixture()
 async def brick_runner(patched_input_transport, patched_output_transport):
     """set up a brick runner, with networking patched away"""
     # pylint: disable=unused-argument
diff --git a/test/brick_runner/test_metrics.py b/test/brick_runner/test_metrics.py
index 2717126..81724e7 100644
--- a/test/brick_runner/test_metrics.py
+++ b/test/brick_runner/test_metrics.py
@@ -39,7 +39,7 @@ async def test_packet_metrics(metric_emitter):  # noqa: F811
         "runner": "R-Test",
         "host": platform.node(),
         "timestamp": Ignored(),
-        "content_type": "titan-packet-metrics",
+        "content_type": "openfba-packet-metrics",
         "packet": "P-Test",
         "execution_time": 1,
         "traveling_time": Ignored(),
@@ -71,7 +71,7 @@ async def test_brick_metrics(metric_emitter):  # noqa: F811
         "runner": "R-Test",
         "host": platform.node(),
         "timestamp": Ignored(),
-        "content_type": "titan-brick-metrics",
+        "content_type": "openfba-brick-metrics",
         "execution_time": 1.0
     }
 
diff --git a/test/control_peer/test_install_brick.py b/test/control_peer/test_install_brick.py
index 175a553..664bf5a 100644
--- a/test/control_peer/test_install_brick.py
+++ b/test/control_peer/test_install_brick.py
@@ -5,6 +5,7 @@ from tempfile import TemporaryDirectory
 from unittest.mock import MagicMock, patch
 
 import pytest
+import pytest_asyncio
 from aiohttp import web
 from aiohttp.test_utils import TestServer as HttpServer
 
@@ -42,8 +43,7 @@ def brick(brick_parent_folder):
         yield BrickBaseDefinition(uid=BRICK_ID, name="Any", family="Any", logger=MagicMock())
 
 
-@pytest.fixture
-@pytest.mark.asyncio
+@pytest_asyncio.fixture
 async def pac_man():
     async def respond(_request):
         return web.Response(body=COMPRESSED_BRICK)
-- 
GitLab


From 4a2dfcf84cedb7fcf33a678a31de78197dd3ce48 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Tue, 7 Nov 2023 11:29:54 +0100
Subject: [PATCH 26/29] update pipeline containers and adhere to new pylint
 rules

---
 .flake8                                 |  7 +--
 .gitlab-ci.yml                          | 12 +++--
 openfba/__init__.py                     |  2 +-
 openfba/apps/control_peer/__main__.py   |  1 -
 openfba/apps/control_peer/services.py   | 67 +++++++++++++------------
 openfba/apps/rmq_to_elastic/__main__.py |  3 +-
 openfba/rabbitmq.py                     |  2 -
 openfba/testing/testrunner.py           | 10 ----
 requirements_prod.txt                   |  3 +-
 setup.py                                |  3 +-
 10 files changed, 52 insertions(+), 58 deletions(-)

diff --git a/.flake8 b/.flake8
index e9632b0..fee08a2 100644
--- a/.flake8
+++ b/.flake8
@@ -8,8 +8,9 @@ exclude =
     setup.py,
     get-pip.py,
 
-extend-ignore =
-  E203,  # whitespace before ':' (black occasionally enforces this)
-  F821,  # undefined name (unfortunately it trips over forward declarations in type hints)
+extend-ignore = E203, F821
+
+#  E203,  # whitespace before ':' (black occasionally enforces this)
+#  F821,  # undefined name (unfortunately it trips over forward declarations in type hints)
 
 max-line-length = 100
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6c938be..17617f4 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -12,11 +12,11 @@ build_openfba:
   stage: build
   tags:
     - dockerex
-  image: industrialdevops/titan_manylinux2010:2019-08-02
+  image: quay.io/pypa/manylinux_2_28_x86_64
   script:
     - mkdir wheelhouse
     # execute python site-package build
-    - /opt/python/cp37-cp37m/bin/pip wheel . -w ./site-packages
+    - /opt/python/cp312-cp312/bin/pip wheel . -w ./site-packages
     # validate the sitepackage
     - auditwheel show $(find ./site-packages/openfba-*.whl)
     # move only required packages to wheelhouse
@@ -33,7 +33,7 @@ test_br_cp:
   stage: check
   tags:
     - dockerex
-  image: industrialdevops/flowengine-py-testcontainer:latest
+  image: python:3.12-slim
   before_script:
     # install python pip package
     - pip3 install ./wheelhouse/openfba*.whl
@@ -45,11 +45,12 @@ pylint_br_cp:
   stage: check
   tags:
     - dockerex
-  image: industrialdevops/flowengine-py-testcontainer:latest
+  image: python:3.12-slim
   allow_failure: false
   before_script:
     # install python pip package
     - pip3 install ./wheelhouse/openfba*.whl
+    - pip3 install -r requirements_dev.txt
   script:
     # run pylint for python module
     - pylint --rcfile=.pylintrc ./openfba
@@ -59,11 +60,12 @@ flake8:
   stage: check
   tags:
     - dockerex
-  image: industrialdevops/flowengine-py-testcontainer:latest
+  image: python:3.12-slim
   allow_failure: false
   script:
     # run flake8 for python module
     # other directories are excluded in .flake8 config file
+    - pip3 install -r requirements_dev.txt
     - flake8
 
 ##############################################
diff --git a/openfba/__init__.py b/openfba/__init__.py
index 86f955d..331d18e 100644
--- a/openfba/__init__.py
+++ b/openfba/__init__.py
@@ -3,4 +3,4 @@
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # found in the LICENSE file in the root directory of this source tree.
-#
\ No newline at end of file
+#
diff --git a/openfba/apps/control_peer/__main__.py b/openfba/apps/control_peer/__main__.py
index 953289a..e65fc9e 100644
--- a/openfba/apps/control_peer/__main__.py
+++ b/openfba/apps/control_peer/__main__.py
@@ -21,7 +21,6 @@ from openfba.apps.control_peer.control_peer import ControlPeer
 from openfba.config import configuration
 
 # to support legacy stuff
-import sys
 sys.modules["titanfe"] = sys.modules["openfba"]
 
 log = openfba.log.getLogger(__name__)
diff --git a/openfba/apps/control_peer/services.py b/openfba/apps/control_peer/services.py
index ee10370..da9e157 100644
--- a/openfba/apps/control_peer/services.py
+++ b/openfba/apps/control_peer/services.py
@@ -9,9 +9,12 @@ import asyncio
 import json
 from http import HTTPStatus
 from abc import ABC, abstractmethod
+import functools
 
+import aiohttp
 from aiohttp.client_exceptions import ClientError
-# from aiohttp_requests import Requests  # initiate a new client every time,
+
+# from aiohttp_requests import requests  # initiate a new client every time,
 
 # because we don't know how many threads are used and each will have its own asyncio loop
 # there must be a better way, but right now I'm short on time...
@@ -38,7 +41,6 @@ class ControlPeerServiceRegistration(ABC):
         """Inquire registration at target_address"""
         while True:
             try:
-                requests = Requests()
                 response = await requests.post(
                     self.control_peer_endpoint, json=json.dumps(own_api_address).strip('"')
                 )
@@ -62,7 +64,6 @@ class ControlPeerServiceRegistration(ABC):
     async def deregister(self, own_api_address):
         """Cancel registration at target_address"""
         try:
-            requests = Requests()
             response = await requests.delete(
                 self.control_peer_endpoint, json=json.dumps(own_api_address).strip('"')
             )
@@ -101,8 +102,6 @@ class PackageManager(ControlPeerServiceRegistration):
     @staticmethod
     async def get(endpoint, context):
         """get endpoint"""
-        requests = Requests()
-
         response = await requests.get(endpoint)
         if response.status != HTTPStatus.OK:
             raise ServiceError(f"{context} failed: {response!r}")
@@ -111,7 +110,6 @@ class PackageManager(ControlPeerServiceRegistration):
 
     async def get_bricks(self):
         """get bricks"""
-        requests = Requests()
         response = await requests.get(self.brick_endpoint)
         if response.status != HTTPStatus.OK:
             raise ServiceError(f"Getting bricks failed: {response!r}")
@@ -137,68 +135,73 @@ class GridManager(ControlPeerServiceRegistration):
         return f"{self.address}/controlpeers"
 
 
-import aiohttp
-import functools
-
 # aiohttp_requests ist currently not available with python3.12
 # we didn't really need from it anyway, so here it is:
 
-# Patch ClientResponse.read to release immediately after read so we don't need to worry about that / use context manager
+# Patch ClientResponse.read to release immediately after read
+# so we don't need to worry about that / use context manager
 _read_only = aiohttp.client_reqrep.ClientResponse.read
+
+
 async def _read_and_release(self):  # noqa
     try:
-        data = await _read_only(self)
+        data = await _read_only(self)  # pylint: disable=disallowed-name
     finally:
         self.close()
 
     return data
+
+
 aiohttp.client_reqrep.ClientResponse.read = _read_and_release
 
 
 class Requests:
-    """ Thin wrapper for aiohttp.ClientSession with Requests simplicity """
+    """Thin wrapper for aiohttp.ClientSession with Requests simplicity"""
+
+    # pylint: disable=protected-access
     def __init__(self, *args, **kwargs):
         self._session_args = (args, kwargs)
         self._session = None
 
     @property
     def session(self):
-        """ An instance of aiohttp.ClientSession """
+        """An instance of aiohttp.ClientSession"""
         if not self._session or self._session.closed or self._session.loop.is_closed():
             self._session = aiohttp.ClientSession(*self._session_args[0], **self._session_args[1])
         return self._session
 
     def __getattr__(self, attr):
-        if attr.upper() in aiohttp.hdrs.METH_ALL:
-            @functools.wraps(self.session._request)
-            def session_request(*args, **kwargs):
-                """
-                This ensures `self.session` is always called where it can check the session/loop state so can't use
-                functools.partials as monkeypatch seems to do something weird where __getattr__ is only called once for
-                each attribute after patch is undone
-                """
-                return self.session._request(attr.upper(), *args, **kwargs)
-
-            return session_request
-        else:
+        if not attr.upper() in aiohttp.hdrs.METH_ALL:
             return super().__getattribute__(attr)
 
+        @functools.wraps(self.session._request)
+        def session_request(*args, **kwargs):
+            """
+            This ensures `self.session` is always called where it can check the session/loop state
+            so can't use functools.partials as monkeypatch seems to do something weird
+            where __getattr__ is only called once for each attribute after patch is undone
+            """
+            return self.session._request(attr.upper(), *args, **kwargs)
+
+        return session_request
+
     def close(self):
         """
         Close aiohttp.ClientSession.
 
-        This is useful to be called manually in tests if each test when each test uses a new loop. After close, new
-        requests will automatically create a new session.
+        This is useful to be called manually in tests if each test when each test uses a new loop.
+        After close, new requests will automatically create a new session.
 
-        Note: We need a sync version for `__del__` and `aiohttp.ClientSession.close()` is async even though it doesn't
-        have to be.
+        Note: We need a sync version for `__del__` and `aiohttp.ClientSession.close()`
+        is async even though it doesn't have to be.
         """
         if self._session:
             if not self._session.closed:
                 # Older aiohttp does not have _connector_owner
-                if not hasattr(self._session, '_connector_owner') or self._session._connector_owner:
+                if not hasattr(self._session, "_connector_owner") or self._session._connector_owner:
                     try:
-                        self._session._connector._close()  # New version returns a coroutine in close() as warning
+                        self._session._connector._close()
+                        # New version returns a coroutine in close() as warning
                     except Exception:
                         self._session._connector.close()
                 self._session._connector = None
@@ -212,5 +215,3 @@ requests = Requests()
 
 package_manager = PackageManager()  # pylint: disable=invalid-name
 grid_manager = GridManager()  # pylint: disable=invalid-name
-
-
diff --git a/openfba/apps/rmq_to_elastic/__main__.py b/openfba/apps/rmq_to_elastic/__main__.py
index 01aa577..96f8e13 100644
--- a/openfba/apps/rmq_to_elastic/__main__.py
+++ b/openfba/apps/rmq_to_elastic/__main__.py
@@ -73,7 +73,8 @@ async def main():
         "--metrics",
         nargs="+",
         default=["openfba.metrics.*"],
-        help="routing keys of the flowengine metrics `<one or more topics>` [default: openfba.metrics.*]",
+        help="routing keys of the flowengine metrics `<one or more topics>`"
+             " [default: openfba.metrics.*]",
     )
     args = arg_parser.parse_args()
 
diff --git a/openfba/rabbitmq.py b/openfba/rabbitmq.py
index efc87e9..cdd02de 100644
--- a/openfba/rabbitmq.py
+++ b/openfba/rabbitmq.py
@@ -139,5 +139,3 @@ class AsyncRMQ:
         tag = self._consumers.pop(queue_name)
         channel = await self.channel()
         await channel.basic_cancel(tag)
-
-#class RMQStream:
\ No newline at end of file
diff --git a/openfba/testing/testrunner.py b/openfba/testing/testrunner.py
index ae81857..7795639 100644
--- a/openfba/testing/testrunner.py
+++ b/openfba/testing/testrunner.py
@@ -58,16 +58,6 @@ class MetricEmitterDummy(MagicMock):
     kafka = MagicMock()
 
 
-class GridManagerDummy:
-    """Mocks the Gridmanager connection"""
-
-    # pylint: disable=too-few-public-methods
-
-    request_scaling = MagicMock()
-    deregister_runner = MagicMock()
-    register_runner = MagicMock()
-
-
 class Input(OriginalInput):
     """TestRunner: Input replacement"""
 
diff --git a/requirements_prod.txt b/requirements_prod.txt
index 847964f..58de43c 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -8,7 +8,8 @@ janus
 elasticsearch
 fastapi
 uvicorn
-aiohttp == 3.9.0b
+aiohttp==3.9.0b1; python_version >= '3.12'
+aiohttp==3.8.*; python_version < '3.12'
 # aiohttp-requests
 requests 
 docopt 
diff --git a/setup.py b/setup.py
index 938c063..aab6d64 100644
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,8 @@ setup(
         "fastapi",
         "uvicorn",
         'uvloop ;platform_system=="Linux"',
-        "aiohttp==3.9.0b",
+        "aiohttp==3.9.0b1; python_version >= '3.12'",
+        "aiohttp==3.8.*; python_version < '3.12'",
         #"aiohttp-requests",
         "dataclasses-json",
         "requests",
-- 
GitLab


From 5ed5f4d0a2f627f3ba418b7c13427c62d1d940b3 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Tue, 14 Nov 2023 14:56:20 +0100
Subject: [PATCH 27/29] refactor the brickrunner to support rmq streams for
 logging and metrics, plus some performance tweaks

- use a deque to get output from brick thread (sufficient, and way faster than other solutions I've tried)
- dont use dataclasses.asdict (uses deepcopy)
---
 .../benchmark_asyncio_threading_and_queues.py | 296 ++++++++++++++++++
 experiments/benchmark_ujo_conversion.py       |  73 +++++
 openfba/apps/brick_runner/__main__.py         |  62 ++--
 openfba/apps/brick_runner/brick.py            | 150 +++++----
 openfba/apps/brick_runner/input.py            |  18 +-
 openfba/apps/brick_runner/metrics.py          |  29 +-
 openfba/apps/brick_runner/output.py           |   5 +-
 openfba/apps/brick_runner/packet.py           |  56 ++--
 openfba/apps/brick_runner/rabbitmq.py         | 109 +++++++
 openfba/apps/brick_runner/runner.py           | 141 ++++-----
 openfba/apps/control_peer/__main__.py         |   5 +-
 openfba/apps/control_peer/control_peer.py     |   6 +-
 openfba/log.py                                |  50 +--
 openfba/rabbitmq.py                           | 168 +++-------
 openfba/utils.py                              |   5 +-
 requirements_prod.txt                         |   2 +-
 setup.py                                      |   2 +-
 17 files changed, 796 insertions(+), 381 deletions(-)
 create mode 100644 experiments/benchmark_asyncio_threading_and_queues.py
 create mode 100644 experiments/benchmark_ujo_conversion.py
 create mode 100644 openfba/apps/brick_runner/rabbitmq.py

diff --git a/experiments/benchmark_asyncio_threading_and_queues.py b/experiments/benchmark_asyncio_threading_and_queues.py
new file mode 100644
index 0000000..ef81d9a
--- /dev/null
+++ b/experiments/benchmark_asyncio_threading_and_queues.py
@@ -0,0 +1,296 @@
+import asyncio
+import collections
+import functools
+import math
+import multiprocessing
+import queue
+import threading
+import time
+import timeit
+from collections import deque
+from concurrent.futures import ProcessPoolExecutor
+
+TOTAL_ITEM_COUNT = 10000
+MAX_QUEUE_SIZE = 1000
+
+
+def simulate_work(i: int):
+    # return i
+    f = float(f"0.{i}")
+    return math.sqrt(i**f * i**f * i**f * i**f)
+
+
+import janus
+
+
+def test_janus():
+    def produce(q: queue.Queue):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            q.put(simulate_work(i))
+
+    async def consume(q: asyncio.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            await q.get()
+            count += 1
+
+    async def main():
+        jq = janus.Queue(maxsize=MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(jq.async_q))
+        producer = asyncio.to_thread(produce, jq.sync_q)
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+
+def produce_mp(q: multiprocessing.Queue):
+    for i in range(1, TOTAL_ITEM_COUNT + 1):
+        q.put(simulate_work(i))
+
+
+def test_multiprocessing_queue():
+    async def consume(q: multiprocessing.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            try:
+                q.get_nowait()
+            except queue.Empty:
+                await asyncio.sleep(0)
+            else:
+                count += 1
+
+    async def main():
+        q = multiprocessing.Manager().Queue(maxsize=MAX_QUEUE_SIZE)
+
+        consumer = asyncio.create_task(consume(q))
+        with ProcessPoolExecutor(max_workers=1) as executor:
+            producer = asyncio.get_running_loop().run_in_executor(
+                executor, functools.partial(produce_mp, q)
+            )
+            await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+
+def test_asyncio_queue():
+    print("test_asyncio_queue: asyncio queues are not thread safe, this does not work")
+    return
+
+    def produce(q: asyncio.Queue):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            while True:
+                try:
+                    q.put_nowait(simulate_work(i))
+                except asyncio.QueueFull:
+                    continue
+
+    async def consume(q: asyncio.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            await q.get()
+            count += 1
+
+    async def main():
+        q = asyncio.Queue(maxsize=MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.to_thread(produce, q)
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+
+def test_asyncio_queue_threadsafe():
+    def produce(q: asyncio.Queue, loop: asyncio.AbstractEventLoop):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            asyncio.run_coroutine_threadsafe(q.put(simulate_work(i)), loop)
+
+    async def consume(q: asyncio.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            await q.get()
+            count += 1
+
+    async def main():
+        q = asyncio.Queue(maxsize=MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.to_thread(produce, q, asyncio.get_running_loop())
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+
+def test_sync_queue():
+    def produce(q: queue.Queue):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            q.put(simulate_work(i))
+
+    async def consume(q: asyncio.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            try:
+                q.get_nowait()  # will do stuff with it and eventually hit an await somewhere else
+            except queue.Empty:
+                await asyncio.sleep(0)  # be cooperative
+            else:
+                count += 1
+
+    async def main():
+        q = queue.Queue(maxsize=MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.to_thread(produce, q)
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+
+def test_sync_queue_to_thread():
+    def produce(q: queue.Queue):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            q.put(simulate_work(i))
+
+    async def consume(q: asyncio.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            await asyncio.to_thread(q.get)
+            count += 1
+
+    async def main():
+        q = queue.Queue(maxsize=MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.to_thread(produce, q)
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+
+def test_sync_dequeue():
+    def produce(q: deque):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            while len(q) >= MAX_QUEUE_SIZE:
+                time.sleep(0.00001)
+            q.append(i)
+
+    async def consume(q: deque):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            if q:
+                q.popleft()  # will do stuff with it and eventually hit an await somewhere else
+                count += 1
+            else:
+                await asyncio.sleep(1 / 500000)  # be cooperative
+
+    async def main():
+        q = deque(maxlen=MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.to_thread(produce, q)
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+def test_sync_dequeue_class():
+    class MyQueue():
+        def __init__(self, maxsize=None):
+            self._maxsize = maxsize
+            self._queue = collections.deque()
+            self._event = threading.Event()
+
+        def put(self, item):
+            while not len(self._queue) < self._maxsize:
+                time.sleep(0.00001)
+            self._queue.append(item)
+
+        async def get(self):
+            while not self._queue:
+                await asyncio.sleep(0.00001)
+            return self._queue.popleft()
+
+    def produce(q: MyQueue):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            q.put(i)
+
+    async def consume(q: MyQueue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            await q.get()
+            count += 1
+
+    async def main():
+        q = MyQueue(MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.to_thread(produce, q)
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+def test_pure_async():
+    async def produce(q: asyncio.Queue):
+        for i in range(1, TOTAL_ITEM_COUNT + 1):
+            await q.put(i)
+
+    async def consume(q: asyncio.Queue):
+        count = 0
+        while count < TOTAL_ITEM_COUNT:
+            await q.get()
+            count += 1
+
+    async def main():
+        q = asyncio.Queue(MAX_QUEUE_SIZE)
+        consumer = asyncio.create_task(consume(q))
+        producer = asyncio.create_task(produce(q))
+        await asyncio.gather(consumer, producer)
+
+    asyncio.run(main())
+
+if __name__ == "__main__":
+    print("testing with total item count:", TOTAL_ITEM_COUNT, "and max queue size:", MAX_QUEUE_SIZE)
+    print("using default loop")
+    for func in (
+        test_janus,
+        # test_asyncio_queue,
+        test_asyncio_queue_threadsafe,
+        test_sync_queue,
+        test_multiprocessing_queue,
+        test_sync_queue_to_thread,
+        test_sync_dequeue,
+        test_sync_dequeue_class,
+        test_pure_async,
+    ):
+        fname = func.__name__
+        print(
+            fname,
+            ":",
+            min(
+                timeit.repeat(
+                    f"{fname}()", setup=f"from __main__ import {fname}", number=1, repeat=3
+                )
+            ),
+        )
+
+    print()
+    print("now with uvloop...")
+
+    import uvloop  # pylint: disable=import-error
+
+    uvloop.install()
+
+    for func in (
+        test_janus,
+        # test_asyncio_queue,
+        test_asyncio_queue_threadsafe,
+        test_sync_queue,
+        test_multiprocessing_queue,
+        test_sync_queue_to_thread,
+        test_sync_dequeue,
+        test_sync_dequeue_class,
+        test_pure_async,
+    ):
+        fname = func.__name__
+        print(
+            fname,
+            ":",
+            min(
+                timeit.repeat(
+                    f"{fname}()", setup=f"from __main__ import {fname}", number=1, repeat=3
+                )
+            ),
+        )
diff --git a/experiments/benchmark_ujo_conversion.py b/experiments/benchmark_ujo_conversion.py
new file mode 100644
index 0000000..bd52688
--- /dev/null
+++ b/experiments/benchmark_ujo_conversion.py
@@ -0,0 +1,73 @@
+import timeit
+
+from datetime import datetime
+from decimal import Decimal
+from typing import Sequence, Mapping, Set
+
+from ujotypes import *
+
+NUMBER = 50_000
+REPEAT = 5
+
+packet = {
+    "uid": "P-bd9a9f37",
+    "started": 1700464549171892861,
+    "port": "",
+    "payload": [1, 2, (3, "test")],
+    "buffer": {},
+    "input_entry": 0.0,
+    "input_exit": 0.0,
+    "output_entry": 0.0,
+    "output_exit": 0.0,
+}
+
+
+def python_to_ujo(py_obj):  # pylint: disable=too-many-return-statements
+    """convert python objects recursively into corresponding UJO
+
+    int, float, etc. will be converted to Int64, Float64, etc.
+    If you actually want e.g. an Int8 do a manual conversion for that specific item beforehand.
+    """
+    if isinstance(py_obj, UjoBase):
+        return py_obj
+
+    if py_obj is None:
+        return UJO_VARIANT_NONE
+    if isinstance(py_obj, bool):
+        return UjoBool(py_obj)
+    if isinstance(py_obj, int):
+        return UjoInt64(py_obj)
+    if isinstance(py_obj, float):
+        return UjoFloat64(py_obj)
+    if isinstance(py_obj, str):
+        return UjoStringUTF8(py_obj)
+    if isinstance(py_obj, datetime):
+        return UjoTimestamp(py_obj)
+    if isinstance(py_obj, Decimal):
+        # Potentially reduces precision
+        return UjoFloat64(float(py_obj))
+
+    if isinstance(py_obj, (Sequence, Set)):
+        ujolist = UjoList()
+        for ujoval in (python_to_ujo(val) for val in py_obj):
+            ujolist.append(ujoval)
+        return ujolist
+
+    if isinstance(py_obj, Mapping):
+        ujomap = UjoMap()
+        ujoitems = ((python_to_ujo(key), python_to_ujo(val)) for key, val in py_obj.items())
+        for ujokey, ujoval in ujoitems:
+            ujomap[ujokey] = ujoval
+        return ujomap
+
+    raise NotImplementedError(
+        f"TODO: SimplifiedUjo TypeConversion for: {type(py_obj)} ({py_obj!r})"
+    )
+
+
+def test():
+    python_to_ujo(packet)
+
+
+times = timeit.repeat("test()", setup=f"from __main__ import test", number=NUMBER, repeat=REPEAT)
+print(f"converting {NUMBER} times, repeated {REPEAT} times - fastest time was:", min(times))
diff --git a/openfba/apps/brick_runner/__main__.py b/openfba/apps/brick_runner/__main__.py
index de1b946..f88246a 100644
--- a/openfba/apps/brick_runner/__main__.py
+++ b/openfba/apps/brick_runner/__main__.py
@@ -14,8 +14,15 @@ import pickle
 import sys
 
 from openfba import log as logging
-from openfba.apps.brick_runner.runner import BrickRunner
-from openfba.config import configuration
+from openfba.config import configuration as config
+from openfba.rabbitmq import RMQStream
+
+from .runner import BrickRunner
+from .output import Output
+from .input import Input
+from .metrics import MetricEmitter
+from .rabbitmq import AsyncRMQ
+
 
 # to support legacy stuff
 sys.modules["titanfe"] = sys.modules["openfba"]
@@ -29,19 +36,7 @@ else:
     uvloop.install()
 
 
-async def run_app(args):
-    """let's do this"""
-
-    configuration.update(pickle.loads(args.configuration))
-    logging.initialize("BrickRunner")
-
-    brick = pickle.loads(args.brick)
-
-    runner = await BrickRunner.create(args.id, brick)
-    await runner.run()
-
-
-def main():
+async def main():
     """parse args and run the application"""
     arg_parser = argparse.ArgumentParser()
     arg_parser.add_argument("-id", type=str, help="Brick Runner ID")  # uuid.UUID,
@@ -50,10 +45,41 @@ def main():
 
     args = arg_parser.parse_args()
 
-    # asyncio.run(run_app(args))
-    asyncio.get_event_loop().run_until_complete(run_app(args))
+    config.update(pickle.loads(args.configuration))
+    brick_instance_definition = pickle.loads(args.brick)
+
+    logging.initialize("BrickRunner", RMQStream("openfba.logs"),
+        context=logging.FlowContext(brick_instance_definition.flow.uid, brick_instance_definition.flow.name, brick_instance_definition.uid, brick_instance_definition.name)
+    )
+
+    logger = logging.TitanPlatformLogger(
+        f"openfba.brick_runner.{args.id}.{brick_instance_definition.name}"
+    )
+
+    rmq_transport = AsyncRMQ()
+
+    input = Input(
+        input_queues=brick_instance_definition.input_queues,
+        transport=rmq_transport,
+        max_idle_time=brick_instance_definition.runtime_parameters.exit_after_idle_seconds,
+        logger=logger,
+    )
+    output = Output(
+        output_connections=brick_instance_definition.connections.output,
+        transport=rmq_transport,
+        exchange_name=brick_instance_definition.message_exchange,
+        logger=logger,
+    )
+
+    metric_emitter = MetricEmitter(metrics_metadata={}, transport=RMQStream("openfba.metrics"), logger=logger)
+
+    runner = BrickRunner(args.id, brick_instance_definition, input, output, metric_emitter, logger)
+    await runner.run()
+
+    await rmq_transport.close()
+    await RMQStream.close()
 
 
 if __name__ == "__main__":
-    main()
+    asyncio.run(main())
     sys.exit(0)
diff --git a/openfba/apps/brick_runner/brick.py b/openfba/apps/brick_runner/brick.py
index d4e2a98..63a0fff 100644
--- a/openfba/apps/brick_runner/brick.py
+++ b/openfba/apps/brick_runner/brick.py
@@ -6,6 +6,9 @@
 #
 
 """ A Brick within the brick runner """
+import asyncio
+import collections
+import queue
 import time
 from collections import namedtuple
 from copy import copy
@@ -31,23 +34,20 @@ PortMapping = namedtuple("PortMapping", ("rules", "type"))
 SENTINEL = object()
 
 
-class Brick:
+class BrickWrapper:
     """Wraps all the Brick-Handling"""
 
     # pylint: disable=too-many-instance-attributes
     def __init__(
         self,
         instance_definition: BrickInstanceDefinition,
-        metric_emitter,
         logger,
-        output: Output,
     ):
-        self.metric_emitter = metric_emitter
-        self.output = output
+        self.log = logger.getChild(self.__class__.__name__)
+        self.log.info(repr(instance_definition))
 
         self.uid = instance_definition.uid
         self.name = instance_definition.name
-
         self.flow = instance_definition.flow
 
         self.processing_parameters = instance_definition.processing_parameters
@@ -59,56 +59,62 @@ class Brick:
         self.brick_type = instance_definition.base.name
         self.brick_family = instance_definition.base.family
 
-        context = logging.FlowContext(self.flow.uid, self.flow.name, self.uid, self.name)
-        logging.global_context.update(context.asdict())
-
-        self.log = logger.getChild("Brick")
         self.module = get_module(instance_definition.base.module_path)
-        self.log.info(repr(instance_definition))
 
-        self._brick_output = janus.Queue()
+        self._brick_output = BrickOutputQueue(maxsize=10_000)
 
         self.adapter = BrickAdapter(
-            AdapterMeta(brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)),
-            self.output_packet,
-            self.log,
-            self.default_port,
+            meta_data=AdapterMeta(
+                brick=(self.uid, self.name), flow=(self.flow.uid, self.flow.name)
+            ),
+            result_put_callback=self.output_as_packet,
+            log=logger,
+            default_port=self.default_port,
         )
 
-        self.instance = None
-        self.last_execution_start = None
+        self.brick = None
 
         self._current_packet = None
 
     def create_instance(self):
         """create an instance of the actual Brick"""
         try:
-            self.instance = self.module.Brick(self.adapter, self.processing_parameters)
+            self.brick = self.module.Brick(self.adapter, self.processing_parameters)
         except AttributeError:
             self.log.with_context.warning("Brick class is missing in module: %r", self.module)
             raise ImportError(f"Brick class is missing in module: {self.module}")
 
     def terminate(self):
-        if isinstance(self.instance, InletBrickBase):
-            self.instance.stop_processing()
+        if isinstance(self.brick, InletBrickBase):
+            self.brick.stop_processing()
 
     def __enter__(self):
         self.create_instance()
-        self.instance.setup()
+        self.brick.setup()
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        self.instance.teardown()
-        self.instance = None
+        self.brick.teardown()
+        self.brick = None
+
+    def __aiter__(self):
+        return self
 
-    @property
-    def execution_time(self):
-        return time_delta_in_ms(self.last_execution_start)
+    async def __anext__(self):
+        # getting packets from the sync brick thread into the asyncio thread __FAST__ is difficult
+        # experiments with janus.Queue and asyncio.run_coroutine_threadsafe and multiprocessing
+        # were slower than the solution below. see experiments/benchmark_...
+        # TODO: support asyncio for inlet bricks
+        packet = await self._brick_output.get()
+        if packet is SENTINEL:
+            raise StopAsyncIteration
+
+        return packet
 
     async def process(self, packet):
         """run the brick module for the given packet"""
         self._current_packet = packet
 
-        self.log.info(
+        self.log.debug(
             "(%s) execute Brick: %s(%s) for %r",
             self.flow.name,
             self.name,
@@ -116,40 +122,34 @@ class Brick:
             packet,
         )
 
-        self.last_execution_start = time.time_ns()
-
-        payload = None
-        if not self.is_inlet:
-            payload = packet.payload
-
-        self.run_instance_processing(payload, packet.port)
-
-        self.metric_emitter.emit_brick_metrics(self.execution_time)
-        if self.is_outlet:
-            self.metric_emitter.emit_packet_metrics(packet, self.execution_time)
-
-    def run_instance_processing(self, payload, port):
-        """do the actual execution of the brick module and return its result"""
+        result = await asyncio.to_thread(self.run_instance_processing, packet)
+        self.log.debug("brick result: %r", result)
+        if result is not None:
+            if not isinstance(result, tuple):
+                payload, port = result, self.default_port
+            elif len(result) > 2:
+                payload, port = result, self.default_port
+            else:
+                # this assumption might fail if someone returns
+                # a two-item tuple not containing payload and port.
+                # we should make that more explicit somehow
+                payload, port = result
+            self.output_as_packet(payload, port)
+
+        self._brick_output.put(SENTINEL)
+
+    def run_instance_processing(self, packet):
         try:
-            result = self.instance.process(payload, port)
-            self.log.debug("brick result: %r", result)
-            if result is not None:
-                if not isinstance(result, tuple):
-                    payload, port = result, self.default_port
-                elif len(result) > 2:
-                    payload, port = result, self.default_port
-                else:
-                    # this assumption might fail if someone returns
-                    # a two-item tuple not containing payload and port.
-                    # we should make that more explicit somehow
-                    payload, port = result
-
-                self.output_packet(payload, port)
-
-        except Exception as error:  # pylint: disable=broad-except
-            self.log.with_context.error("brick execution failed: %r", error, exc_info=True)
-
-    def output_packet(self, payload, port=None):
+            return self.brick.process(packet.payload, packet.port)
+        except Exception:  # pylint: disable=broad-except
+            self.log.with_context.error(
+                "brick execution failed - port: %s, payload: %r",
+                packet.port,
+                packet.payload,
+                exc_info=True,
+            )
+
+    def output_as_packet(self, payload, port=None):
         """publish the packet"""
         if port is None:
             port = self.default_port
@@ -157,8 +157,32 @@ class Brick:
         if not isinstance(payload, UjoBase):
             payload = python_to_ujo(payload)
 
-        packet = Packet(port=port) if self.is_inlet else copy(self._current_packet)
+        packet = Packet() if self.is_inlet else copy(self._current_packet)
+        packet.port = port
         packet.payload = payload
 
         self.log.debug("brick output on port [%s]: %r", port, packet)
-        self.output.put(packet, port)
+        self._brick_output.put(packet)
+
+
+class BrickOutputQueue:
+    """A very thin wrapper around collections.deque
+    only what we need here, not generally applicable
+    """
+
+    def __init__(self, maxsize=None):
+        self._maxsize = maxsize
+        self._queue = collections.deque()
+        self._wait_time = 2.5e-07  # 250ns
+
+    def put(self, item):
+        """blocking (sleep) if full"""
+        while not len(self._queue) < self._maxsize:
+            time.sleep(self._wait_time)
+        self._queue.append(item)
+
+    async def get(self):
+        while not self._queue:
+            await asyncio.sleep(self._wait_time)
+        item = self._queue.popleft()
+        return item
diff --git a/openfba/apps/brick_runner/input.py b/openfba/apps/brick_runner/input.py
index cbaeb5a..3465196 100644
--- a/openfba/apps/brick_runner/input.py
+++ b/openfba/apps/brick_runner/input.py
@@ -10,8 +10,6 @@ import asyncio
 import functools
 import typing as T
 
-
-import openfba.log
 from openfba.apps.brick_runner.packet import Packet
 
 Port = T.NewType("Port", str)
@@ -24,13 +22,13 @@ class Input:
         self,
         input_queues: T.Dict[Port, str],
         transport,
-        max_idle_time=60,
-        logger=None,
+        logger,
+        max_idle_time,
     ):
-        self.log = logger.getChild("Input") if logger else openfba.log.getLogger(__name__)
+        self.log = logger.getChild("Input")
         self.transport = transport
 
-        self.max_idle_time = max((max_idle_time, 0.2))  # give it at least a chance to run
+        self.max_idle_time = max((max_idle_time, 0.25))  # give it at least a chance to run
 
         self._getter = asyncio.Future()
         self._stop = False
@@ -64,7 +62,7 @@ class Input:
         raise StopAsyncIteration
 
     async def start(self):
-        self.log.debug("start consumers: %r", self._queues)
+        self.log.debug("start consuming: %r", self._queues)
         await asyncio.gather(
             *(
                 self.transport.start_consuming(queue, functools.partial(self._on_new_message, port))
@@ -74,12 +72,12 @@ class Input:
 
     async def stop(self):
         """stop consuming queues"""
-        self.log.debug("stop consumers: %r", self._queues)
+        self.log.debug("stop consuming: %r", self._queues)
+        self._stop = True
+        self._getter.cancel()
         await asyncio.gather(
             *(self.transport.stop_consuming(queue) for queue in self._queues.values())
         )
-        self._stop = True
-        self._getter.cancel()
 
     async def _on_new_message(self, port, packet, done_callback: T.Callable):
         self.log.debug("received on port %r: %r", port, packet)
diff --git a/openfba/apps/brick_runner/metrics.py b/openfba/apps/brick_runner/metrics.py
index 6bcb31e..c5dd659 100644
--- a/openfba/apps/brick_runner/metrics.py
+++ b/openfba/apps/brick_runner/metrics.py
@@ -32,9 +32,6 @@ class MetricEmitter:
         self.log = logger.getChild("MetricEmitter") if logger else openfba.log.getLogger(__name__)
         self.metrics_meta = metrics_metadata
         self.transport = transport
-        self.transport.channel.exchange_declare(
-            RMQ_METRIC_EXCHANGE, exchange_type="fanout", durable=True
-        )
 
     def set_metadata_from_runner(self, runner):
         """assigns flowname and brickname after brickrunner has gotten his assignment"""
@@ -43,15 +40,14 @@ class MetricEmitter:
     def emit(self, queue, metrics_dict):
         """emit the metrics"""
         self.log.metric("%s", metrics_dict)
-        self.transport.publish(RMQ_METRIC_EXCHANGE, queue, bytes(json.dumps(metrics_dict), "utf-8"))
+        self.transport.enqueue(bytes(json.dumps(metrics_dict), "utf-8"))
 
-    def emit_packet_metrics(self, packet, duration):  # pylint: disable=missing-docstring
-        packet_metrics = PacketMetricsAtBrick(
+    def emit_packet_metrics(self, packet):  # pylint: disable=missing-docstring
+        packet_metrics = PacketMetrics(
             **self.metrics_meta,
             packet=packet.uid,
-            execution_time=duration,
-            traveling_time=packet.traveling_time,
-            **packet.queue_times,
+            time_total=packet.traveling_time,
+            #**packet.queue_times,
         )
         self.emit(RMQ_PACKET_METRICS_QUEUE, packet_metrics.to_dict())
 
@@ -87,22 +83,17 @@ class MetricsBase(DictConvertable, ABC):
 
 
 @dataclass
-class PacketMetricsAtBrick(MetricsBase):
-    """Metric data for a packet being processed at a Brick"""
-
+class PacketMetrics(MetricsBase):
+    """Metric data for a packet reaching and outlet"""
     content_type: str = "openfba-packet-metrics"
     packet: str = "PacketUid?"
-    execution_time: float = 0.0
-    traveling_time: float = 0.0
-    time_in_input: float = 0.0
-    time_in_output: float = 0.0
-    time_on_wire: float = 0.0
-    at_outlet: bool = False  # TODO
+    time_total: float = 0.0
+    # time_queued: float = 0.0
+    # time_processing: float = 0.0
 
 
 @dataclass
 class BrickMetrics(MetricsBase):
     """Metric data for brick executions"""
-
     content_type: str = "openfba-brick-metrics"
     execution_time: float = 0.0
diff --git a/openfba/apps/brick_runner/output.py b/openfba/apps/brick_runner/output.py
index 8fbe02d..c803b7f 100644
--- a/openfba/apps/brick_runner/output.py
+++ b/openfba/apps/brick_runner/output.py
@@ -58,12 +58,13 @@ class Output:
     def __getitem__(self, port) -> Port:
         return self.ports[port]
 
-    def put(self, packet, port):
+    async def put(self, packet):
+        port = packet.port
         consumers = self[port].consumers
         for consumer in consumers:
             next_packet = consumer.create_packet(packet)
             self.log.debug("publish %r on port %r to consumer: %r", next_packet, port, consumers)
-            self.transport.publish(self.exchange_name, consumer.queue_name, bytes(next_packet))
+            await self.transport.publish(self.exchange_name, consumer.queue_name, bytes(next_packet))
 
 
 @dataclass
diff --git a/openfba/apps/brick_runner/packet.py b/openfba/apps/brick_runner/packet.py
index 0b2a922..e7f5a71 100644
--- a/openfba/apps/brick_runner/packet.py
+++ b/openfba/apps/brick_runner/packet.py
@@ -15,7 +15,6 @@ from dataclasses import dataclass, field
 from ujotypes import UjoBase, UjoStringUTF8, read_buffer, UjoMap, ujo_to_python
 from ujotypes.variants.none import UjoNone
 
-from openfba.messages import PacketMessage
 from openfba.ujo_helper import py_to_ujo_bytes
 from openfba.utils import create_uid, ns_to_ms, time_delta_in_ms, DictConvertable
 from openfba.apps.brick_runner.value_mapping import Buffer
@@ -54,41 +53,14 @@ class Packet(DictConvertable):
         # why not use the "uid"?
         # because if a packet gets split up into multiple packets, they all have the same uid
         return id(self)
-
-    @property
-    def traveling_time(self) -> float:
-        return time_delta_in_ms(self.started)
-
-    @property
-    def queue_times(self):
-        return {
-            "time_in_input": ns_to_ms(self.input_exit - self.input_entry),
-            "time_in_output": ns_to_ms(self.output_exit - self.output_entry),
-            "time_on_wire": ns_to_ms(self.input_entry - self.output_exit),
-        }
-
-    def update_input_entry(self):
-        self.input_entry = time.time_ns()
-
-    def update_input_exit(self):
-        self.input_exit = time.time_ns()
-
-    def update_output_entry(self):
-        self.output_entry = time.time_ns()
-
-    def update_output_exit(self):
-        self.output_exit = time.time_ns()
-
-    def as_message(self):
-        return PacketMessage(self.to_dict())
-
     def __bytes__(self):
         """encode a packet"""
         if ENCODING == "PICKLE":
             return pickle.dumps(self)
 
         # default: Ujo
-        return py_to_ujo_bytes(self.to_dict())
+        packet_dict = self.to_dict()
+        return py_to_ujo_bytes(packet_dict)
 
     @classmethod
     def from_bytes(cls, bytez: bytes):
@@ -115,3 +87,27 @@ class Packet(DictConvertable):
         pymap["buffer"] = Buffer(buffer)
 
         return cls.from_dict(pymap)
+
+    @property
+    def traveling_time(self) -> float:
+        return time_delta_in_ms(self.started)
+
+    @property
+    def queue_times(self):
+        return {
+            "time_in_input": ns_to_ms(self.input_exit - self.input_entry),
+            "time_in_output": ns_to_ms(self.output_exit - self.output_entry),
+            "time_on_wire": ns_to_ms(self.input_entry - self.output_exit),
+        }
+
+    def update_input_entry(self):
+        self.input_entry = time.time_ns()
+
+    def update_input_exit(self):
+        self.input_exit = time.time_ns()
+
+    def update_output_entry(self):
+        self.output_entry = time.time_ns()
+
+    def update_output_exit(self):
+        self.output_exit = time.time_ns()
diff --git a/openfba/apps/brick_runner/rabbitmq.py b/openfba/apps/brick_runner/rabbitmq.py
new file mode 100644
index 0000000..7b506e6
--- /dev/null
+++ b/openfba/apps/brick_runner/rabbitmq.py
@@ -0,0 +1,109 @@
+#
+# Copyright (c) 2019-present, wobe-systems GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# found in the LICENSE file in the root directory of this source tree.
+#
+
+"""thin rabbitmq wrapper"""
+import asyncio
+
+import aioamqp
+import aioamqp.channel
+
+from openfba.config import configuration as config
+
+
+# TODO: Handle Reconnect?
+
+class AsyncRMQ:
+    """asynchronous RabbitMQ transport for consuming"""
+
+    # TODO: make "robust" - handle reconnect and stuff
+    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
+
+    _instance = None  # singleton
+
+    def __new__(cls, *args, **kwargs):
+        if cls._instance is None:
+            cls._instance = super().__new__(cls, *args, **kwargs)
+        return cls._instance
+
+    def __init__(self):
+        self._connection: aioamqp.protocol.AmqpProtocol = None
+        self._sub_channel: aioamqp.channel.Channel = None
+        self._pub_channel: aioamqp.channel.Channel = None
+        self._consumers = {}
+
+    async def close(self):
+        """close the connection"""
+        for conn in (self._sub_channel, self._pub_channel, self._connection):
+            if not conn:
+                continue
+            try:
+                await conn.close()
+            except Exception:  # pylint: disable=broad-except
+                pass
+        self._instance = None
+
+    async def connect(self):
+        """create new rabbit mq connection/channel"""
+        _, self._connection = await aioamqp.connect(
+            host=config.rabbitmq_params.host,
+            port=config.rabbitmq_params.port,
+            login=config.rabbitmq_params.user,
+            password=config.rabbitmq_params.password,
+        )
+
+        self._pub_channel = await self._connection.channel()
+        self._sub_channel = await self._connection.channel()
+        await self._sub_channel.basic_qos(prefetch_count=2)
+
+    async def disconnect(self):
+        """disconnect"""
+        if not self._connection:
+            return
+
+        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
+
+        await self._sub_channel.close()
+        await self._pub_channel.close()
+        await self._connection.close()
+
+    async def connection(self):
+        if not self._connection:
+            await self.connect()
+        return self._connection
+
+    async def start_consuming(self, queue_name, on_new_message_callback):
+        """start consuming the given queue"""
+        await self.connection()
+
+        async def callback_wrapper(msgchannel, body, envelope, _):
+            async def done_callback():
+                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
+
+            await on_new_message_callback(body, done_callback)
+
+        channel = self._sub_channel
+
+        await channel.queue_declare(queue_name, durable=True)
+        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
+        self._consumers[queue_name] = consumer_tag["consumer_tag"]
+
+    async def stop_consuming(self, queue_name):
+        """stop consuming the queue"""
+        tag = self._consumers.pop(queue_name)
+        await self._sub_channel.basic_cancel(tag)
+
+    async def publish(self, exchange_name: str, queue_name: str, message: bytes):
+        """publish a message on the given exchange with the given routing key"""
+        await self.connection()
+        try:
+            await self._pub_channel.basic_publish(
+                exchange_name=exchange_name,
+                routing_key=queue_name,
+                payload=message,
+            )
+        except aioamqp.ChannelClosed:
+            pass  # we are probably shutting down operations
diff --git a/openfba/apps/brick_runner/runner.py b/openfba/apps/brick_runner/runner.py
index ac2bdea..0b1b0c7 100644
--- a/openfba/apps/brick_runner/runner.py
+++ b/openfba/apps/brick_runner/runner.py
@@ -10,114 +10,98 @@
 import asyncio
 import os
 import signal
-
+import time
 
 from openfba.apps.control_peer.brick import BrickInstanceDefinition
-from openfba import log as logging
-from openfba.rabbitmq import RMQ, AsyncRMQ
+from openfba.log import TitanPlatformLogger
+from openfba.utils import time_delta_in_ms
 
-from .brick import Brick
+from .brick import BrickWrapper
 from .input import Input
 from .metrics import MetricEmitter
 from .output import Output
 from .packet import Packet
 
 
+
 class ForcedShutdown(Exception):
     """forcefully shutting down"""
 
 
 class BrickRunner:
-    """The BrickRunner will create an Input, get a setup from the control peer,
-       create corresponding outputs and then start processing packets from the input.
-
-    Arguments:
-        uid (str): a unique id for the runner
-    """
-
-    def __init__(self, uid):
+    def __init__(
+        self,
+        uid: str,
+        brick_instance_definition: BrickInstanceDefinition,
+        input: Input,
+        output: Output,
+        metric_emitter: MetricEmitter,
+        logger: TitanPlatformLogger,
+    ):
         self.uid = uid
-        self.log = logging.TitanPlatformLogger(
-            f"{__name__}.{self.uid}", context=logging.global_context
-        )
-        self.loop = asyncio.get_event_loop()
-
-        # done async in setup
-        self.input = None
-        self.output = None
-        self.brick = None
-
-        self.idle_since = None
-        self.metric_emitter = None
+        self.log = logger.getChild(self.__class__.__name__)
+        self.input = input
+        self.output = output
+        self.metric_emitter = metric_emitter
+
         self._stop = asyncio.Event()
 
-    @classmethod
-    async def create(cls, uid, brick_definition: BrickInstanceDefinition):
-        """Creates a brick runner instance and does the initial setup phase before returning it"""
-        br = cls(uid)  # pylint: disable=invalid-name
-        await br.setup(brick_definition)
-        return br
-
-    async def setup(self, brick_definition: BrickInstanceDefinition):
-        """does the inital setup parts that have to be awaited"""
-        self.log = logging.TitanPlatformLogger(
-            f"{__name__}.{self.uid}.{brick_definition.name}",
-            context=logging.global_context,
-        )
-
-        self.input = Input(
-            input_queues=brick_definition.input_queues,
-            transport=AsyncRMQ(),
-            max_idle_time=brick_definition.runtime_parameters.exit_after_idle_seconds,
-            logger=self.log,
-        )
-        self.output = Output(
-            output_connections=brick_definition.connections.output,
-            transport=RMQ(),
-            exchange_name=brick_definition.message_exchange,
-            logger=self.log,
-        )
-
-        self.metric_emitter = MetricEmitter(metrics_metadata={}, transport=RMQ(), logger=self.log)
-
-        self.brick = Brick(brick_definition, self.metric_emitter, self.log, self.output)
-
-        self.metric_emitter.set_metadata_from_runner(self)
+        self.brick = BrickWrapper(brick_instance_definition, logger)
 
         self.add_signal_handlers()
 
     async def run(self):
         """process items from the input"""
-        self.log.with_context.info("Start runner: %s", self.uid)
+        self.log.with_context.info("start runner: %s", self.uid)
 
         try:
             with self.brick:
                 if self.brick.is_inlet:
-                    await self.brick.process(Packet())
-                    # in case the inlet terminates by itself, the following line forces
-                    # the brickrunner to wait and keep living until someone stops the flow.
-                    # otherwise the GM would keep restarting the inlet.
-                    await self._stop.wait()
+                    await self.run_inlet()
                 else:
-                    await self.input.start()
-                    async for packet in self.input:
-                        self.log.debug("process packet: %s", packet)
-                        try:
-                            await self.brick.process(packet)
-                        except Exception as exception:  # pylint: disable=broad-except
-                            self.log.with_context.error(
-                                "processing packet: %s failed: %r",
-                                packet.payload,
-                                exception,
-                                exc_info=True,
-                            )
-                        finally:
-                            self.input.mark_done(packet)
+                    await self.run_non_inlet()
         except Exception:  # pylint: disable=broad-except
-            self.log.with_context.error("Brick failed", exc_info=True)
+            self.log.with_context.error("brick execution failed", exc_info=True)
 
         await asyncio.sleep(0.1)  # last chance for other tasks to finish
-        self.log.with_context.warning("Exit")
+        self.log.with_context.warning("exit")
+
+    async def run_inlet(self):
+        with self.brick:
+            task = asyncio.create_task(self.brick.process(Packet()))
+            async for result in self.brick:
+                await self.output.put(result)
+            await task
+            # in case the inlet terminates by itself, the following line forces
+            # the brickrunner to wait and keep living until someone stops the flow.
+            # otherwise the GM would keep restarting the inlet.
+            await self._stop.wait()
+
+    async def run_non_inlet(self):
+        await self.input.start()
+        async for packet in self.input:
+            self.log.debug("process packet: %s", packet)
+            start_time = time.time_ns()
+            try:
+                task = asyncio.create_task(self.brick.process(packet))
+                async for result_packet in self.brick:
+                    await self.output.put(result_packet)
+                await task
+            except Exception as exception:  # pylint: disable=broad-except
+                self.log.with_context.error(
+                    "processing packet: %s failed: %r",
+                    packet.payload,
+                    exception,
+                    exc_info=True,
+                )
+            else:
+                # write metrics
+                duration = time_delta_in_ms(start_time)
+                self.metric_emitter.emit_brick_metrics(duration)
+                if self.brick.is_outlet:
+                    self.metric_emitter.emit_packet_metrics(packet)
+            finally:
+                self.input.mark_done(packet)
 
     def schedule_shutdown(self, sig=signal.SIGABRT, frame=None):  # pylint: disable=unused-argument
         """schedule the shutdown of the brick runner"""
@@ -133,7 +117,6 @@ class BrickRunner:
             # stop async stuff
             if not self.brick.is_inlet:
                 await self.input.stop()
-            RMQ().close()
 
         asyncio.create_task(shutdown())
 
diff --git a/openfba/apps/control_peer/__main__.py b/openfba/apps/control_peer/__main__.py
index e65fc9e..3a247c7 100644
--- a/openfba/apps/control_peer/__main__.py
+++ b/openfba/apps/control_peer/__main__.py
@@ -19,18 +19,19 @@ import site
 import openfba.log
 from openfba.apps.control_peer.control_peer import ControlPeer
 from openfba.config import configuration
+from openfba.rabbitmq import RMQStream
 
 # to support legacy stuff
 sys.modules["titanfe"] = sys.modules["openfba"]
 
-log = openfba.log.getLogger(__name__)
-
 
 async def run_app(args):
     """run the application"""
     configuration.update_from_yaml(args.config_file)
     configuration.brick_folder = Path(args.brick_folder).resolve()
 
+    openfba.log.initialize(service="ControlPeer", rmq_streamer=RMQStream("openfba.logs"))
+
     cp = ControlPeer.create()  # pylint: disable=invalid-name
     try:
         await cp.run()
diff --git a/openfba/apps/control_peer/control_peer.py b/openfba/apps/control_peer/control_peer.py
index 591caf0..ace712b 100644
--- a/openfba/apps/control_peer/control_peer.py
+++ b/openfba/apps/control_peer/control_peer.py
@@ -29,8 +29,6 @@ class ControlPeer:
     """
 
     def __init__(self):
-        logging.initialize(service="ControlPeer")
-
         self.loop = asyncio.get_event_loop()
 
         self.runners = {}
@@ -128,7 +126,7 @@ class ControlPeer:
             runner = BrickRunner(brick, on_termination_cb=self.remove_runner)
             runner.start()
         except Exception as error:  # pylint: disable=broad-except
-            logger = TitanLogAdapter(log, extra=FlowContext.from_brick(brick).asdict())
+            logger = TitanLogAdapter(log, extra=FlowContext.from_brick(brick))
             logger.error("Failed to start runner for: %s", brick, exc_info=True)
             raise error
 
@@ -150,4 +148,4 @@ class ControlPeer:
             log.debug("Stopping all runners")
             runners = self.runners.values()
 
-        await asyncio.gather(*[runner.stop() for runner in runners])
+        await asyncio.gather(*(runner.stop() for runner in runners))
diff --git a/openfba/log.py b/openfba/log.py
index 169f916..8112c18 100644
--- a/openfba/log.py
+++ b/openfba/log.py
@@ -9,8 +9,9 @@
 import sys
 import platform
 import traceback
-from dataclasses import dataclass, asdict
+from dataclasses import dataclass, fields
 from typing import Optional
+from collections.abc import Mapping
 
 import pathlib
 import logging
@@ -19,7 +20,6 @@ from datetime import datetime
 
 import ruamel.yaml
 
-from openfba.rabbitmq import RMQ
 from openfba.ujo_helper import py_to_ujo_bytes
 
 RMQ_LOG_EXCHANGE = "openfba.logging"
@@ -37,7 +37,7 @@ class TitanLogRecord(logging.LogRecord):  # pylint: disable=too-few-public-metho
 
 
 @dataclass
-class FlowContext:
+class FlowContext(Mapping):
     """The Flow Context"""
 
     flowuid: str = ""
@@ -45,6 +45,19 @@ class FlowContext:
     brickuid: str = ""
     brickname: str = ""
 
+    def __getitem__(self, item):
+        try:
+            return getattr(self, item)
+        except AttributeError as e:
+            raise KeyError(str(e))
+
+    def __iter__(self):
+        return iter(field.name for field in fields(self))
+
+    def __len__(self):
+        return len(fields(self))
+
+
     @classmethod
     def from_flow(cls, flow: "openfba.apps.control_peer.flow.Flow"):  # noqa
         return cls(flow.uid, flow.name)
@@ -54,7 +67,7 @@ class FlowContext:
         return cls(brick.flow.uid, brick.flow.name, brick.uid, brick.name)
 
     def asdict(self):
-        return asdict(self)
+        return {field.name: getattr(self, field.name) for field in fields(self)}
 
 
 class TitanLogAdapter(logging.LoggerAdapter):
@@ -70,10 +83,7 @@ class TitanLogAdapter(logging.LoggerAdapter):
 
     @context.setter
     def context(self, new):
-        self.extra.clear()
-        if isinstance(new, FlowContext):
-            new = new.asdict()
-        self.extra.update(new)
+        self.extra = new
 
 
 class TitanPlatformLogger(logging.Logger):
@@ -87,8 +97,6 @@ class TitanPlatformLogger(logging.Logger):
 
     def __init__(self, name, context: Optional[FlowContext] = None):
         # pylint: disable=super-init-not-called, unused-argument
-        if isinstance(context, FlowContext):
-            context = context.asdict()
         self.context = context
         self.context_logger = TitanLogAdapter(self, context or global_context)
 
@@ -123,8 +131,6 @@ def getLogger(  # pylint: disable=invalid-name ; noqa: N802
     logger = logging.getLogger(name)
 
     if context is not None:
-        if isinstance(context, FlowContext):
-            context = context.asdict()
         logger = TitanLogAdapter(logger, context)
 
     elif global_context:
@@ -133,7 +139,7 @@ def getLogger(  # pylint: disable=invalid-name ; noqa: N802
     return logger
 
 
-def initialize(service=""):
+def initialize(service="", rmq_streamer=None, context=None):
     """initialize the openfba logging module
 
     Args:
@@ -143,13 +149,16 @@ def initialize(service=""):
 
     log_config_file = pathlib.Path(__file__).parent / "log_config.yml"
     with open(log_config_file) as cfile:  # pylint: disable=unspecified-encoding
-        log_config = ruamel.yaml.YAML(typ='safe', pure=True).load(cfile)
+        log_config = ruamel.yaml.YAML(typ="safe", pure=True).load(cfile)
         logging.config.dictConfig(log_config)
 
-    rmq_publisher = RMQ()
-    rmq_publisher.channel.exchange_declare(RMQ_LOG_EXCHANGE, exchange_type="fanout", durable=True)
+    if context is not None:
+        global_context.update(context)
+
+    if rmq_streamer is None:
+        return
 
-    rmq_handler = RabbitMQLogHandler(rmq_publisher, exchange_name=RMQ_LOG_EXCHANGE)
+    rmq_handler = RabbitMQLogHandler(rmq_streamer)
     root = logging.getLogger()
     root.addHandler(rmq_handler)
 
@@ -225,11 +234,10 @@ class UjoBinFormatter(logging.Formatter):
 class RabbitMQLogHandler(logging.Handler):
     """Stream LogRecords via RabbitMQ"""
 
-    def __init__(self, publisher: RMQ, exchange_name: str):
+    def __init__(self, publisher):
         logging.Handler.__init__(self)
         self.formatter = UjoBinFormatter()
-        self.exchange_name = exchange_name
-        self.producer = publisher
+        self.publisher = publisher
 
     def emit(self, record):
         """emits the record"""
@@ -237,7 +245,7 @@ class RabbitMQLogHandler(logging.Handler):
             log_message = self.format(record)
             if not isinstance(log_message, bytes):
                 log_message = bytes(log_message, "utf-8")
-            self.producer.publish(self.exchange_name, record.name, log_message)
+            self.publisher.enqueue(log_message)
         except Exception:  # pylint: disable=broad-except
             exc_info = sys.exc_info()
             traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, sys.stderr)
diff --git a/openfba/rabbitmq.py b/openfba/rabbitmq.py
index cdd02de..508318e 100644
--- a/openfba/rabbitmq.py
+++ b/openfba/rabbitmq.py
@@ -7,135 +7,45 @@
 
 """thin rabbitmq wrapper"""
 import asyncio
+import collections
+import contextlib
 
-import aioamqp
-import aioamqp.channel
-
-import pika
+import rbfly.streams as rbs
 
 from openfba.config import configuration as config
-
-
-# TODO: Handle Reconnect?
-
-class RMQ:
-    """synchronous RabbitMQ transport for publishing"""
-    _instance = None  # singleton
-
-    def __new__(cls, *args, **kwargs):
-        if cls._instance is None:
-            cls._instance = super().__new__(cls, *args, **kwargs)
-        return cls._instance
-
-    def __init__(self):
-        self.connection = pika.BlockingConnection(
-            pika.ConnectionParameters(
-                host=config.rabbitmq_params.host,
-                port=config.rabbitmq_params.port,
-                credentials=pika.credentials.PlainCredentials(
-                    config.rabbitmq_params.user, config.rabbitmq_params.password
-                ),
-            )
-        )
-
-        self.channel = self.connection.channel()
-
-    def close(self):
-        """close the connection"""
-        for conn in (self.channel, self.connection):
-            try:
-                conn.close()
-            except Exception:  # pylint: disable=broad-except
-                pass
-        self._instance = None
-
-    def publish(self, exchange_name: str, queue_name: str, message: bytes):
-        """publish a message on the given exchange with the given routing key"""
-        self.channel.basic_publish(
-            exchange=exchange_name,
-            routing_key=queue_name,
-            body=message,
-        )
-
-
-class AsyncRMQ:
-    """asynchronous RabbitMQ transport for consuming"""
-
-    # TODO: make "robust" - handle reconnect and stuff
-    #  maybe find inspiration in aio_pikas RobustConnection/Channel/Queue?
-
-    _instance = None  # singleton
-
-    def __new__(cls, *args, **kwargs):
-        if cls._instance is None:
-            cls._instance = super().__new__(cls, *args, **kwargs)
-        return cls._instance
-
-    def __init__(self):
-        self._connection: aioamqp.protocol.AmqpProtocol = None
-        self._channel: aioamqp.channel.Channel = None
-        self._consumers = {}
-
-    def close(self):
-        """close the connection"""
-        for conn in (self._channel, self._connection):
-            if not conn:
-                continue
-            try:
-                conn.close()
-            except Exception:  # pylint: disable=broad-except
-                pass
-        self._instance = None
-
-    async def connect(self):
-        """create new rabbit mq connection/channel"""
-        _, self._connection = await aioamqp.connect(
-            host=config.rabbitmq_params.host,
-            port=config.rabbitmq_params.port,
-            login=config.rabbitmq_params.user,
-            password=config.rabbitmq_params.password,
-            heartbeat=60,
-        )
-
-        self._channel = await self._connection.channel()
-        await self._channel.basic_qos(prefetch_count=2)
-
-    async def disconnect(self):
-        """disconnect"""
-        if not self._connection:
-            return
-
-        await asyncio.gather(*(self.stop_consuming(consumer) for consumer in list(self._consumers)))
-
-        await self._channel.close()
-        await self._connection.close()
-
-    async def connection(self):
-        if not self._connection:
-            await self.connect()
-        return self._connection
-
-    async def channel(self) -> aioamqp.channel.Channel:
-        await self.connection()
-        return self._channel
-
-    async def start_consuming(self, queue_name, on_new_message_callback):
-        """start consuming the given queue"""
-
-        async def callback_wrapper(msgchannel, body, envelope, _):
-            async def done_callback():
-                await msgchannel.basic_client_ack(delivery_tag=envelope.delivery_tag)
-
-            await on_new_message_callback(body, done_callback)
-
-        channel = await self.channel()
-
-        await channel.queue_declare(queue_name, durable=True)
-        consumer_tag = await channel.basic_consume(callback_wrapper, queue_name)
-        self._consumers[queue_name] = consumer_tag["consumer_tag"]
-
-    async def stop_consuming(self, queue_name):
-        """stop consuming the queue"""
-        tag = self._consumers.pop(queue_name)
-        channel = await self.channel()
-        await channel.basic_cancel(tag)
+from openfba.utils import cancel_tasks
+
+
+class RMQStream:
+    _client = None
+    _tasks = set()
+
+    def __init__(self, stream_name):
+        if not self._client:
+            rmqp = config.rabbitmq_params
+            rmq_uri = f"rabbitmq-stream://{rmqp.user}:{rmqp.password}@{rmqp.host}"
+            self._client = rbs.streams_client(rmq_uri)
+
+        self._messages = collections.deque()
+        self._tasks.add(asyncio.create_task(self._publish(stream_name)))
+
+    async def _publish(self, stream_name):
+        await self._client.create_stream(stream_name)
+        async with self._client.publisher(stream_name, cls=rbs.PublisherBatchFast) as publisher:
+            while True:
+                await asyncio.sleep(0.1)
+                if not self._messages:
+                    continue
+                with contextlib.suppress(IndexError):
+                    while msg := self._messages.popleft():
+                        publisher.batch(msg)
+                await publisher.flush()
+
+    def enqueue(self, message):
+        self._messages.append(message)
+    @classmethod
+    async def close(cls):
+        await cancel_tasks(cls._tasks)
+        cls._tasks.clear()
+        if cls._client is not None:
+            cls._client.disconnect()
diff --git a/openfba/utils.py b/openfba/utils.py
index 7e3e1e8..a4931a9 100644
--- a/openfba/utils.py
+++ b/openfba/utils.py
@@ -18,7 +18,7 @@ from abc import ABC
 
 from typing import Sequence, Iterable, Union
 from contextlib import contextmanager, suppress
-from dataclasses import asdict, fields, is_dataclass, dataclass
+from dataclasses import fields, is_dataclass, dataclass
 from datetime import datetime
 from pathlib import Path
 from types import ModuleType
@@ -217,7 +217,8 @@ class DictConvertable(ABC):
     """Mixin to make a dataclass convert from/into a dictionary"""
 
     def to_dict(self):
-        return asdict(self)
+        #return asdict(self)  # as dict makes deepcopies, which is too time expensive
+        return {field.name: getattr(self, field.name) for field in fields(self)}
 
     def dicts_to_dataclasses(self):
         """Convert all fields of type `dataclass` into an dataclass instance of the
diff --git a/requirements_prod.txt b/requirements_prod.txt
index 58de43c..0f5f644 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -15,7 +15,7 @@ requests
 docopt 
 pycryptodome 
 aioamqp 
-pika 
+rbfly
 
 # linux only:
 uvloop;platform_system=="Linux"
diff --git a/setup.py b/setup.py
index aab6d64..3fb3cbf 100644
--- a/setup.py
+++ b/setup.py
@@ -81,7 +81,7 @@ setup(
         "requests",
         "docopt",
         "aioamqp",
-        "pika",
+        "rbfly",
         "pycryptodome",
     ],
     ext_modules=[],
-- 
GitLab


From a8be359119c89544c60f7ab9c99b0f5f3c41cd75 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Tue, 21 Nov 2023 16:41:43 +0100
Subject: [PATCH 28/29] replace kafka_to_elastic with rmq_to_elastic

---
 openfba/apps/brick_runner/metrics.py    |   2 +-
 openfba/apps/rmq_to_elastic/__main__.py | 290 +++++++++---------------
 requirements_prod.txt                   |   2 +-
 3 files changed, 111 insertions(+), 183 deletions(-)

diff --git a/openfba/apps/brick_runner/metrics.py b/openfba/apps/brick_runner/metrics.py
index c5dd659..0f43519 100644
--- a/openfba/apps/brick_runner/metrics.py
+++ b/openfba/apps/brick_runner/metrics.py
@@ -40,7 +40,7 @@ class MetricEmitter:
     def emit(self, queue, metrics_dict):
         """emit the metrics"""
         self.log.metric("%s", metrics_dict)
-        self.transport.enqueue(bytes(json.dumps(metrics_dict), "utf-8"))
+        self.transport.enqueue(json.dumps(metrics_dict).encode("utf-8"))
 
     def emit_packet_metrics(self, packet):  # pylint: disable=missing-docstring
         packet_metrics = PacketMetrics(
diff --git a/openfba/apps/rmq_to_elastic/__main__.py b/openfba/apps/rmq_to_elastic/__main__.py
index 96f8e13..f33c74a 100644
--- a/openfba/apps/rmq_to_elastic/__main__.py
+++ b/openfba/apps/rmq_to_elastic/__main__.py
@@ -1,224 +1,152 @@
 """
-kafka_to_elastic
+rmq_to_elastic
 
 Usage:
-  kafka_to_elastic [-k <bootstrap_servers>] [-e <elastic_host>] [-s <service_topics>] -t [<topics>]
-  kafka_to_elastic (-h | --help)
+  rmq_to_elastic [-r <rabbitmq_uri>] [-e <elastic_uri>] [<streams>...]
+  rmq_to_elastic (-h | --help)
 
 Example:
-  kafka_to_elastic --kafka 127.0.0.1:9092 --elastic 127.0.0.1 -t a_topic another_topic
+  rmq_to_elastic -r rabbitmq-stream://guest:guest@localhost:5552 \
+--elastic=localhost my_stream1 my_stream2
 
 Options:
   -h, --help     Show this screen.
 
-  -k <bootstrap_servers>, --kafka=<bootstrap_servers>
-      the Kafka bootstrap_servers to connect to as `<host>:<port> <host:port> ...`
-      [default: 10.14.0.23:9092]
+  -r <rabbitmq_uri>, --rabbitmq=<rabbitmq_uri>
+      rabbitmq connection uri, e.g. `rabbitmq-stream://guest:guest@rabbitmq:5552`
+      [default: rabbitmq-stream://guest:guest@rabbitmq:5552]
 
-  -e <elastic_host>, --elastic=<elastic_host>
-      the elastic host `<hostname_or_ip>` [default: 10.14.0.21]
+  -e <elastic_uri:>, --elastic=<elastic_uri>
+      the elastic uri e.g. `http://elastic:9200` [default: http://elastic:9200]
 
-  -s <service_topics>, --service-topics=<service_topics>
-      topics of titan service logs `<one or more topics>` [default: titan.servicelogs]
-
-  -t <flowengine_topics>, --flowengine-topics=<flowengine_topics>
-      topics of titan service logs `<one or more topics>` [default: titanfe.metrics]
+  stream
+      name the rabbitmq stream to parse, one or more
+      [default: openfba.metrics]
 """
+import sys
+from contextlib import suppress
+
+from docopt import docopt
 
-# pylint: disable=broad-except, missing-docstring
-# missing-function-docstring, missing-class-docstring
-import argparse
-import os
 import asyncio
-import pickle
 import json
+import os
 import signal
-from contextlib import suppress
-from datetime import datetime
-from collections import namedtuple
-from aiokafka import AIOKafkaConsumer, ConsumerStoppedError
-from elasticsearch import AsyncElasticsearch
-from elasticsearch.helpers import async_bulk
+from datetime import date
 
-KafkaTopics = namedtuple("KafkaTopics", ("name", "type"))
-SERVICE_TOPIC_TYPE = "service"
-FLOWENGINE_TOPIC_TYPE = "flowengine"
+import rbfly.streams as rbs
 
+from elasticsearch import AsyncElasticsearch
+from elasticsearch.helpers import async_streaming_bulk
 
-async def main():
-    arg_parser = argparse.ArgumentParser()
-    arg_parser.add_argument(
-        "-e",
-        "--elastic",
-        type=str,
-        default="10.14.0.21",
-        help=" the elastic host `<hostname_or_ip>`",
-    )
-    arg_parser.add_argument(
-        "-r",
-        "--rmq",
-        type=str,
-        default="10.14.0.23:9092",
-        help=" the Kafka bootstrap_servers to connect to as `<host>:<port> <host:port> ...`",
-    )
-    arg_parser.add_argument(
-        "-l",
-        "--logs",
-        nargs="+",
-        default=["openfba.logging"],
-        help="topics of titan go service logs `<one or more topics>` [default: openfba.logging]",
-    )
-    arg_parser.add_argument(
-        "-m",
-        "--metrics",
-        nargs="+",
-        default=["openfba.metrics.*"],
-        help="routing keys of the flowengine metrics `<one or more topics>`"
-             " [default: openfba.metrics.*]",
-    )
-    args = arg_parser.parse_args()
-
-    signals = signal.SIGINT, signal.SIGTERM
+DEFAULT_STREAMS = ["openfba.metrics"]  # , "openfba.logs"],
 
-    if os.name != "nt":  # not available on windows
-        signals += (signal.SIGHUP,)  # pylint: disable=no-member
 
-    for sign in signals:
-        signal.signal(sign, schedule_shutdown)
-
-    bootstrap_servers = args.rmq
-    elastic_host = args.elastic
-    topics = KafkaTopics(
-        name=args.topics + args.service_topics,
-        type={
-            **{topic: FLOWENGINE_TOPIC_TYPE for topic in args.topics},
-            **{topic: SERVICE_TOPIC_TYPE for topic in args.service_topics},
-        },
-    )
-
-    print("Reading", topics.name, "From", bootstrap_servers, "To", elastic_host)
-
-    async with KafkaReader(
-        topics.name, bootstrap_servers=bootstrap_servers
-    ) as kafka, ElasticWriter(
-        elastic_host=elastic_host
-    ) as elastic:  # pylint: disable= ; noqa
-        async for topic, records in kafka.read():
-            len_records = f"{len(records)} record{'s' if len(records) > 1 else ''}"
-            print(f"processing {len_records} from {topic.topic} of type {topics.type[topic.topic]}")
-            msgs = list(transform_kafka_to_elastic(records, topics.type[topic.topic]))
-            await elastic.bulk_insert(msgs)
-
-
-def schedule_shutdown(sign, _):
-    print(f"Received {signal.Signals(sign).name} ...")  # pylint: disable=no-member
-
-    async def shutdown():
-        tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
-        for task in tasks:
-            task.cancel()
-
-        print(f"Cancelling outstanding tasks ({len(tasks)})")
-        await asyncio.gather(*tasks)
+async def main():
+    add_signal_handlers()
 
-    asyncio.create_task(shutdown())
+    args = docopt(__doc__)
+    rabbitmq_uri = args["--rabbitmq"]
+    elastic_host = args["--elastic"]
+    streams = args["<streams>"] or DEFAULT_STREAMS
 
+    rmq_client = rbs.streams_client(rabbitmq_uri)
+    elastic_client = AsyncElasticsearch(hosts=elastic_host)
 
-class KafkaReader:
-    def __init__(self, topics, bootstrap_servers):
-        self.consumer = AIOKafkaConsumer(
-            *topics,
-            loop=asyncio.get_event_loop(),
-            bootstrap_servers=bootstrap_servers,
-            # auto_offset_reset='earliest',
+    try:
+        await asyncio.gather(
+            activity_indicator(),
+            *(process(stream, rmq_client, elastic_client) for stream in streams),
         )
+    finally:
+        await elastic_client.close()
+        await rmq_client.disconnect()
 
-    async def start(self):
-        await self.consumer.start()
-
-    async def stop(self):
-        await self.consumer.stop()
 
-    async def __aenter__(self):
-        await self.start()
-        return self
+async def activity_indicator():
+    while True:
+        await asyncio.sleep(5)
+        print(".", end="", file=sys.stdout)
 
-    async def __aexit__(self, exc_type, exc_val, exc_tb):
-        await self.stop()
 
-    def __aiter__(self):
-        return self
+async def process(stream, rmq_client, elastic_client):
+    extractor = extract(rmq_client, stream)
+    transformer = transform(extractor)
+    loader = load(elastic_client, transformer)
+    await loader
 
-    async def __anext__(self):
-        while True:
-            try:
-                batch = await self.consumer.getmany(timeout_ms=1000)
-            except (asyncio.CancelledError, ConsumerStoppedError):
-                raise StopAsyncIteration
 
-            if not batch:
-                print(".", end="", flush=True)
-                continue
+async def extract(rmq_client, stream):
+    # Offset.FIRST / Offset.LAST / Offset.timestamp() ?
+    await rmq_client.create_stream(stream)
+    async for msg in rmq_client.subscribe(stream, offset=rbs.Offset.FIRST):
+        yield msg
 
-            return batch
-
-    async def read(self):
-        async for batch in self:
-            for topic, records in batch.items():
-                yield topic, records
-
-
-class ElasticWriter:
-    def __init__(self, elastic_host):
-        self.elastic = AsyncElasticsearch(hosts=[{"host": elastic_host}])
-
-    async def __aenter__(self):
-        await self.elastic.__aenter__()
-        return self
-
-    async def __aexit__(self, exc_type, exc_val, exc_tb):
-        await self.elastic.close()
-
-    async def bulk_insert(self, document_generator):
-        await async_bulk(self.elastic, document_generator)
 
+async def transform(extractor):
+    async for message in extractor:
+        try:
+            content = json.loads(message)
+        except Exception as exception:
+            print("failed to load json: %r", message, exception)
+            continue
+
+        if "timestamp" in content:
+            content["@timestamp"] = content.pop("timestamp")
+        elif "time" in content:
+            content["@timestamp"] = content.pop("time")
+        else:
+            print("No time/timestamp in message:", content)
+            continue
+
+        if "content_type" in content:
+            doc_type = content["content_type"]
+            index = f"{doc_type}-{date.today().isoformat()}"  # %Y-%m-%d
+        elif "package" in content:
+            package = content["package"].split("/")[0]
+            index = f"{package.lower()}-{date.today().isoformat()}"  # %Y-%m-%d
+            doc_type = "service"
+        else:
+            print("unsupported document:", content)
+            continue
+
+        yield {"_op_type": "index", "_index": index, "_type": doc_type, "_source": content}
+
+
+async def load(elastic_client, transformer):
+    async for ok, result in async_streaming_bulk(elastic_client, transformer):
+        if not ok:
+            print("streaming to elastic failed", result)
+
+
+def add_signal_handlers():
+    signals = signal.SIGINT, signal.SIGTERM
 
-def transform_kafka_to_elastic(batch, topic_type):
-    def transform(message):
-        content = pickle.loads(message.value)
-        content["@timestamp"] = content.pop("timestamp")
+    if os.name != "nt":  # not available on windows
+        signals += (signal.SIGHUP,)  # pylint: disable=no-member
 
-        doc_type = content["content_type"]
-        index = f"{doc_type}-{datetime.now():%Y-%m-%d}"
+    for sign in signals:
+        signal.signal(sign, signal_handler)
 
-        return {"_op_type": "index", "_index": index, "_type": doc_type, "_source": content}
 
-    def transform_service_log(message):
-        content = json.loads(message.value)
-        content["@timestamp"] = content.pop("time")
-        package = content["package"].split("/")[0]
-        index = f"{package.lower()}-{datetime.now():%Y-%m-%d}"
+def signal_handler(sign, _):
+    print(f"Received {signal.Signals(sign).name}")  # pylint: disable=no-member
+    print("Preparing shutdown, this might take a moment...")
+    asyncio.create_task(shutdown())
 
-        return {"_op_type": "index", "_index": index, "_type": "service", "_source": content}
 
-    for message in batch:
-        try:
-            if topic_type == SERVICE_TOPIC_TYPE:
-                yield transform_service_log(message)
-            else:
-                yield transform(message)
+async def shutdown():
+    async def cancel(task):
+        task.cancel()
+        with suppress(asyncio.CancelledError):
+            await task
 
-        except Exception as error:
-            print("Failed to transform ", message, error)
+    tasks = asyncio.all_tasks() - {asyncio.current_task()}
+    print(f"Cancelling outstanding tasks ({len(tasks)})")
+    await asyncio.gather(*(cancel(task) for task in tasks))
 
 
 if __name__ == "__main__":
-
-    async def run_main():
-        try:
-            with suppress(asyncio.CancelledError):
-                await main()
-        except Exception as error:
-            print("Error:", repr(error))
-
-    asyncio.run(run_main())
+    with suppress(asyncio.CancelledError):
+        asyncio.run(main())
+    print("Exit")
diff --git a/requirements_prod.txt b/requirements_prod.txt
index 0f5f644..f05191c 100644
--- a/requirements_prod.txt
+++ b/requirements_prod.txt
@@ -5,7 +5,7 @@ ujoschema
 dataclasses-json
 ruamel.yaml 
 janus 
-elasticsearch
+elasticsearch==7.8.*  # later stuff is incompatible with our current elastic version (6.4.2)
 fastapi
 uvicorn
 aiohttp==3.9.0b1; python_version >= '3.12'
-- 
GitLab


From 94638c7c5a31000f511742b6869564b2654ba798 Mon Sep 17 00:00:00 2001
From: Sebastian Loehner <sl@wobe-systems.com>
Date: Fri, 24 Nov 2023 13:27:06 +0100
Subject: [PATCH 29/29] fix some logging issues

---
 openfba/apps/brick_runner/brick.py  | 40 +++++++++++++++++++----------
 openfba/apps/brick_runner/packet.py |  1 +
 openfba/apps/brick_runner/runner.py | 25 +++++++-----------
 openfba/log.py                      |  1 -
 openfba/rabbitmq.py                 |  1 +
 5 files changed, 37 insertions(+), 31 deletions(-)

diff --git a/openfba/apps/brick_runner/brick.py b/openfba/apps/brick_runner/brick.py
index 63a0fff..481f99c 100644
--- a/openfba/apps/brick_runner/brick.py
+++ b/openfba/apps/brick_runner/brick.py
@@ -9,6 +9,7 @@
 import asyncio
 import collections
 import queue
+import sys
 import time
 from collections import namedtuple
 from copy import copy
@@ -59,6 +60,7 @@ class BrickWrapper:
         self.brick_type = instance_definition.base.name
         self.brick_family = instance_definition.base.family
 
+        sys.modules["titanfe.log"] = logging  # fix the global_context
         self.module = get_module(instance_definition.base.module_path)
 
         self._brick_output = BrickOutputQueue(maxsize=10_000)
@@ -72,29 +74,33 @@ class BrickWrapper:
             default_port=self.default_port,
         )
 
-        self.brick = None
+        self.instance = None
 
         self._current_packet = None
 
+    def __str__(self):
+        return self.__class__.__name__ + f"({self.flow.name}-{self.name}-{self.uid})"
+
     def create_instance(self):
         """create an instance of the actual Brick"""
         try:
-            self.brick = self.module.Brick(self.adapter, self.processing_parameters)
+            self.instance = self.module.Brick(self.adapter, self.processing_parameters)
         except AttributeError:
             self.log.with_context.warning("Brick class is missing in module: %r", self.module)
             raise ImportError(f"Brick class is missing in module: {self.module}")
 
     def terminate(self):
-        if isinstance(self.brick, InletBrickBase):
-            self.brick.stop_processing()
+        if hasattr(self.instance, "stop_processing"):
+            self.log.debug("call stop processing")
+            self.instance.stop_processing()
 
     def __enter__(self):
         self.create_instance()
-        self.brick.setup()
+        self.instance.setup()
 
     def __exit__(self, exc_type, exc_val, exc_tb):
-        self.brick.teardown()
-        self.brick = None
+        self.instance.teardown()
+        self.instance = None
 
     def __aiter__(self):
         return self
@@ -134,13 +140,13 @@ class BrickWrapper:
                 # a two-item tuple not containing payload and port.
                 # we should make that more explicit somehow
                 payload, port = result
-            self.output_as_packet(payload, port)
+            self.output_as_packet(payload, port, force=True)
 
-        self._brick_output.put(SENTINEL)
+        self._brick_output.put(SENTINEL, force=True)
 
     def run_instance_processing(self, packet):
         try:
-            return self.brick.process(packet.payload, packet.port)
+            return self.instance.process(packet.payload, packet.port)
         except Exception:  # pylint: disable=broad-except
             self.log.with_context.error(
                 "brick execution failed - port: %s, payload: %r",
@@ -149,7 +155,7 @@ class BrickWrapper:
                 exc_info=True,
             )
 
-    def output_as_packet(self, payload, port=None):
+    def output_as_packet(self, payload, port=None, force=False):
         """publish the packet"""
         if port is None:
             port = self.default_port
@@ -162,21 +168,27 @@ class BrickWrapper:
         packet.payload = payload
 
         self.log.debug("brick output on port [%s]: %r", port, packet)
-        self._brick_output.put(packet)
+        self._brick_output.put(packet, force)
 
 
 class BrickOutputQueue:
     """A very thin wrapper around collections.deque
     only what we need here, not generally applicable
     """
-
     def __init__(self, maxsize=None):
         self._maxsize = maxsize
         self._queue = collections.deque()
         self._wait_time = 2.5e-07  # 250ns
 
-    def put(self, item):
+    def put(self, item, force=False):
         """blocking (sleep) if full"""
+        # TODO: find a better than "force"
+        # I'm working around the fact, that we sometimes put from within
+        # the asyncio thread and if we run into the blocking sleep here
+        # we are deadlocked.
+        if force:
+            return self._queue.append(item)
+
         while not len(self._queue) < self._maxsize:
             time.sleep(self._wait_time)
         self._queue.append(item)
diff --git a/openfba/apps/brick_runner/packet.py b/openfba/apps/brick_runner/packet.py
index e7f5a71..d3e9f20 100644
--- a/openfba/apps/brick_runner/packet.py
+++ b/openfba/apps/brick_runner/packet.py
@@ -53,6 +53,7 @@ class Packet(DictConvertable):
         # why not use the "uid"?
         # because if a packet gets split up into multiple packets, they all have the same uid
         return id(self)
+
     def __bytes__(self):
         """encode a packet"""
         if ENCODING == "PICKLE":
diff --git a/openfba/apps/brick_runner/runner.py b/openfba/apps/brick_runner/runner.py
index 0b1b0c7..5771fc2 100644
--- a/openfba/apps/brick_runner/runner.py
+++ b/openfba/apps/brick_runner/runner.py
@@ -23,11 +23,6 @@ from .output import Output
 from .packet import Packet
 
 
-
-class ForcedShutdown(Exception):
-    """forcefully shutting down"""
-
-
 class BrickRunner:
     def __init__(
         self,
@@ -53,7 +48,6 @@ class BrickRunner:
     async def run(self):
         """process items from the input"""
         self.log.with_context.info("start runner: %s", self.uid)
-
         try:
             with self.brick:
                 if self.brick.is_inlet:
@@ -63,19 +57,18 @@ class BrickRunner:
         except Exception:  # pylint: disable=broad-except
             self.log.with_context.error("brick execution failed", exc_info=True)
 
-        await asyncio.sleep(0.1)  # last chance for other tasks to finish
         self.log.with_context.warning("exit")
+        await asyncio.sleep(0.1)  # last chance for other tasks to finish
 
     async def run_inlet(self):
-        with self.brick:
-            task = asyncio.create_task(self.brick.process(Packet()))
-            async for result in self.brick:
-                await self.output.put(result)
-            await task
-            # in case the inlet terminates by itself, the following line forces
-            # the brickrunner to wait and keep living until someone stops the flow.
-            # otherwise the GM would keep restarting the inlet.
-            await self._stop.wait()
+        task = asyncio.create_task(self.brick.process(Packet()))
+        async for result in self.brick:
+            await self.output.put(result)
+        await task
+        # in case the inlet terminates by itself, the following line forces
+        # the brickrunner to wait and keep living until someone stops the flow.
+        # otherwise the GM would keep restarting the inlet.
+        await self._stop.wait()
 
     async def run_non_inlet(self):
         await self.input.start()
diff --git a/openfba/log.py b/openfba/log.py
index 8112c18..ed1b464 100644
--- a/openfba/log.py
+++ b/openfba/log.py
@@ -57,7 +57,6 @@ class FlowContext(Mapping):
     def __len__(self):
         return len(fields(self))
 
-
     @classmethod
     def from_flow(cls, flow: "openfba.apps.control_peer.flow.Flow"):  # noqa
         return cls(flow.uid, flow.name)
diff --git a/openfba/rabbitmq.py b/openfba/rabbitmq.py
index 508318e..1e0473e 100644
--- a/openfba/rabbitmq.py
+++ b/openfba/rabbitmq.py
@@ -45,6 +45,7 @@ class RMQStream:
         self._messages.append(message)
     @classmethod
     async def close(cls):
+        await asyncio.sleep(0.1)  # one last flush
         await cancel_tasks(cls._tasks)
         cls._tasks.clear()
         if cls._client is not None:
-- 
GitLab