diff --git a/src/integration-tests/test_admin_client.py b/src/integration-tests/test_admin_client.py index 6f866cadcf..6a8d001ab2 100644 --- a/src/integration-tests/test_admin_client.py +++ b/src/integration-tests/test_admin_client.py @@ -1,5 +1,6 @@ """ -Testing admin session. +This suite of test cases exercises admin session connection and some basic +commands. """ import json @@ -9,40 +10,34 @@ from bmq.dev.it.process.admin import AdminClient -class TestAdminClient: - """ - This suite of test cases exercises admin session connection and some basic - commands. - """ +def test_admin(local_cluster: Cluster): + cluster: Cluster = local_cluster + endpoint: str = cluster.config.definition.nodes[0].transport.tcp.endpoint # type: ignore - def test_admin(self, local_cluster: Cluster): - cluster: Cluster = local_cluster - endpoint: str = cluster.config.definition.nodes[0].transport.tcp.endpoint # type: ignore + # Extract the (host, port) pair from the config + m = re.match(r".+://(.+):(\d+)", endpoint) # tcp://host:port + assert m is not None + host, port = str(m.group(1)), int(m.group(2)) - # Extract the (host, port) pair from the config - m = re.match(r".+://(.+):(\d+)", endpoint) # tcp://host:port - assert m is not None - host, port = str(m.group(1)), int(m.group(2)) + # Start the admin client + admin = AdminClient() + admin.connect(host, port) - # Start the admin client - admin = AdminClient() - admin.connect(host, port) + # Check basic "help" command + assert ( + "This process responds to the following CMD subcommands:" + in admin.send_admin("help") + ) - # Check basic "help" command - assert ( - "This process responds to the following CMD subcommands:" - in admin.send_admin("help") - ) + # Check non-existing "invalid cmd" command + assert "Unable to decode command" in admin.send_admin("invalid cmd") - # Check non-existing "invalid cmd" command - assert "Unable to decode command" in admin.send_admin("invalid cmd") + # Check more complex "brokerconfig dump" command, expect valid json + # with the same "port" value as the one used for this connection + broker_config_str = admin.send_admin("brokerconfig dump") + broker_config = json.loads(broker_config_str) - # Check more complex "brokerconfig dump" command, expect valid json - # with the same "port" value as the one used for this connection - broker_config_str = admin.send_admin("brokerconfig dump") - broker_config = json.loads(broker_config_str) + assert broker_config["networkInterfaces"]["tcpInterface"]["port"] == port - assert broker_config["networkInterfaces"]["tcpInterface"]["port"] == port - - # Stop the admin session - admin.stop() + # Stop the admin session + admin.stop() diff --git a/src/integration-tests/test_alarms.py b/src/integration-tests/test_alarms.py index bbdd8007b5..df94606ace 100644 --- a/src/integration-tests/test_alarms.py +++ b/src/integration-tests/test_alarms.py @@ -1,58 +1,57 @@ +""" +Testing broker ALARMS. +""" + import time import bmq.dev.it.testconstants as tc from bmq.dev.it.fixtures import Cluster, cluster, tweak # pylint: disable=unused-import -class TestAlarms: +@tweak.cluster.queue_operations.consumption_monitor_period_ms(500) +@tweak.domain.max_idle_time(3) +def test_no_alarms_for_a_slow_queue(cluster: Cluster): """ - Testing broker ALARMS. + Test no broker ALARMS for a slowly moving queue. """ + leader = cluster.last_known_leader + proxy = next(cluster.proxy_cycle()) - @tweak.cluster.queue_operations.consumption_monitor_period_ms(500) - @tweak.domain.max_idle_time(3) - def test_no_alarms_for_a_slow_queue(self, cluster: Cluster): - """ - Test no broker ALARMS for a slowly moving queue. - """ - leader = cluster.last_known_leader - proxy = next(cluster.proxy_cycle()) - - producer = proxy.create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) + producer = proxy.create_client("producer") + producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) - consumer1 = proxy.create_client("consumer1") - consumer2 = proxy.create_client("consumer2") - consumer1.open( - tc.URI_PRIORITY, flags=["read"], max_unconfirmed_messages=1, succeed=True - ) + consumer1 = proxy.create_client("consumer1") + consumer2 = proxy.create_client("consumer2") + consumer1.open( + tc.URI_PRIORITY, flags=["read"], max_unconfirmed_messages=1, succeed=True + ) - producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) + producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) - consumer1.confirm(tc.URI_PRIORITY, "*", succeed=True) + consumer1.confirm(tc.URI_PRIORITY, "*", succeed=True) - producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) - producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) + producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) + producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) - time.sleep(4) + time.sleep(4) - # First, test the alarm - assert leader.alarms("QUEUE_CONSUMER_MONITOR", 1) - leader.drain() + # First, test the alarm + assert leader.alarms("QUEUE_CONSUMER_MONITOR", 1) + leader.drain() - # Then test no alarm while consumer1 slowly confirms - time.sleep(1) - consumer1.confirm(tc.URI_PRIORITY, "*", succeed=True) + # Then test no alarm while consumer1 slowly confirms + time.sleep(1) + consumer1.confirm(tc.URI_PRIORITY, "*", succeed=True) - time.sleep(1) - consumer1.confirm(tc.URI_PRIORITY, "*", succeed=True) - producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) + time.sleep(1) + consumer1.confirm(tc.URI_PRIORITY, "*", succeed=True) + producer.post(tc.URI_PRIORITY, ["msg1"], succeed=True, wait_ack=True) - time.sleep(1) - # Consumer2 picks the last message - consumer2.open( - tc.URI_PRIORITY, flags=["read"], max_unconfirmed_messages=1, succeed=True - ) + time.sleep(1) + # Consumer2 picks the last message + consumer2.open( + tc.URI_PRIORITY, flags=["read"], max_unconfirmed_messages=1, succeed=True + ) - time.sleep(1) - assert not leader.alarms("QUEUE_CONSUMER_MONITOR", 1) + time.sleep(1) + assert not leader.alarms("QUEUE_CONSUMER_MONITOR", 1) diff --git a/src/integration-tests/test_appids.py b/src/integration-tests/test_appids.py index f5e4a3724f..c2ff1608ee 100644 --- a/src/integration-tests/test_appids.py +++ b/src/integration-tests/test_appids.py @@ -26,491 +26,496 @@ def set_app_ids(cluster: Cluster, app_ids: List[str]): # noqa: F811 cluster.reconfigure_domain_values(tc.DOMAIN_FANOUT, {}, succeed=True) -class TestDynamicAppids: - def test_open_alarm_authorize_post(self, cluster: Cluster, logger): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() +def test_open_alarm_authorize_post(cluster: Cluster, logger): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() + + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + + all_app_ids = authorized_app_ids + ["quux"] + + # --------------------------------------------------------------------- + # Create a consumer for each authorized substream. + + consumers = {} + + for app_id in authorized_app_ids: + consumer = next(proxies).create_client(app_id) + consumers[app_id] = consumer + consumer.open(f"{tc.URI_FANOUT}?id={app_id}", flags=["read"], succeed=True) + + # --------------------------------------------------------------------- + # Create a consumer for the unauthorized substream. This should succeed + # but with an ALARM. + + quux = next(proxies).create_client("quux") + consumers["quux"] = quux + assert ( + quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) + == Client.e_SUCCESS + ) + assert leader.alarms() + + # --------------------------------------------------------------------- + # Check that authorized substreams are alive and 'quux' is unauthorized. + + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + + barStatus, bazStatus, fooStatus, quuxStatus = sorted( + [ + leader.capture(r"(\w+).*: status=(\w+)(?:, StorageIter.atEnd=(\w+))?", 60) + for i in all_app_ids + ], + key=lambda m: m[1], + ) + assert barStatus[2] == "alive" + assert bazStatus[2] == "alive" + assert fooStatus[2] == "alive" + assert quuxStatus.group(2, 3) == ("unauthorized", None) + + assert ( + quux.configure( + f"{tc.URI_FANOUT}?id=quux", max_unconfirmed_messages=10, block=True + ) + == Client.e_SUCCESS + ) - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + # --------------------------------------------------------------------- + # Post a message. + producer.post(tc.URI_FANOUT, ["msg1"], succeed=True, wait_ack=True) - all_app_ids = authorized_app_ids + ["quux"] + # --------------------------------------------------------------------- + # Check that 'quux' (unauthorized) client did not receive it. + logger.info('Check that "quux" has not seen any messages') + assert not quux.wait_push_event(timeout=2, quiet=True) + assert len(quux.list(f"{tc.URI_FANOUT}?id=quux", block=True)) == 0 - # --------------------------------------------------------------------- - # Create a consumer for each authorized substream. + # --------------------------------------------------------------------- + # Authorize 'quux'. + set_app_ids(cluster, authorized_app_ids + ["quux"]) - consumers = {} + # --------------------------------------------------------------------- + # Check that all substreams are alive. - for app_id in authorized_app_ids: - consumer = next(proxies).create_client(app_id) - consumers[app_id] = consumer - consumer.open(f"{tc.URI_FANOUT}?id={app_id}", flags=["read"], succeed=True) + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - # --------------------------------------------------------------------- - # Create a consumer for the unauthorized substream. This should succeed - # but with an ALARM. + for app_id in all_app_ids: + leader.outputs_regex(r"(\w+).*: status=alive", timeout) - quux = next(proxies).create_client("quux") - consumers["quux"] = quux - assert ( - quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) - == Client.e_SUCCESS - ) - assert leader.alarms() + # --------------------------------------------------------------------- + # Post a second message. - # --------------------------------------------------------------------- - # Check that authorized substreams are alive and 'quux' is unauthorized. + producer.post(tc.URI_FANOUT, ["msg2"]) + assert producer.outputs_regex(r"MESSAGE.*ACK", timeout) - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + # --------------------------------------------------------------------- + # Ensure that previously authorized substreams get 2 messages and the + # newly authorized gets one. - barStatus, bazStatus, fooStatus, quuxStatus = sorted( - [ - leader.capture( - r"(\w+).*: status=(\w+)(?:, StorageIter.atEnd=(\w+))?", 60 - ) - for i in all_app_ids - ], - key=lambda m: m[1], + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + # pylint: disable=cell-var-from-loop; passing lambda to 'wait_until' is safe + for app_id in authorized_app_ids: + logger.info(f"Check if {app_id} has seen 2 messages") + assert wait_until( + lambda: len( + consumers[app_id].list(f"{tc.URI_FANOUT}?id={app_id}", block=True) + ) + == 2, + 3, ) - assert barStatus[2] == "alive" - assert bazStatus[2] == "alive" - assert fooStatus[2] == "alive" - assert quuxStatus.group(2, 3) == ("unauthorized", None) + logger.info("Check if quux has seen 1 message") + assert wait_until( + lambda: len(quux.list(f"{tc.URI_FANOUT}?id=quux", block=True)) == 1, 3 + ) + + for app_id in all_app_ids: assert ( - quux.configure( - f"{tc.URI_FANOUT}?id=quux", max_unconfirmed_messages=10, block=True - ) + consumers[app_id].close(f"{tc.URI_FANOUT}?id={app_id}", block=True) == Client.e_SUCCESS ) - # --------------------------------------------------------------------- - # Post a message. - producer.post(tc.URI_FANOUT, ["msg1"], succeed=True, wait_ack=True) + # Start the 'quux' consumer and then ensure that no alarm is raised at + # leader/primary when a consumer for a recently authorized appId is + # stopped and started. - # --------------------------------------------------------------------- - # Check that 'quux' (unauthorized) client did not receive it. - logger.info('Check that "quux" has not seen any messages') - assert not quux.wait_push_event(timeout=2, quiet=True) - assert len(quux.list(f"{tc.URI_FANOUT}?id=quux", block=True)) == 0 + quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], succeed=True) + assert not leader.alarms() - # --------------------------------------------------------------------- - # Authorize 'quux'. - set_app_ids(cluster, authorized_app_ids + ["quux"]) - # --------------------------------------------------------------------- - # Check that all substreams are alive. +def test_create_authorize_open_post(cluster: Cluster): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write"], succeed=True) - for app_id in all_app_ids: - leader.outputs_regex(r"(\w+).*: status=alive", timeout) + # --------------------------------------------------------------------- + # Authorize 'quux'. + set_app_ids(cluster, authorized_app_ids + ["quux"]) - # --------------------------------------------------------------------- - # Post a second message. + # --------------------------------------------------------------------- + # Create a consumer for 'quux. This should succeed. - producer.post(tc.URI_FANOUT, ["msg2"]) - assert producer.outputs_regex(r"MESSAGE.*ACK", timeout) + quux = next(proxies).create_client("quux") + assert ( + quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) + == Client.e_SUCCESS + ) - # --------------------------------------------------------------------- - # Ensure that previously authorized substreams get 2 messages and the - # newly authorized gets one. + # --------------------------------------------------------------------- + # Check that all substreams are alive. - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - # pylint: disable=cell-var-from-loop; passing lambda to 'wait_until' is safe - for app_id in authorized_app_ids: - logger.info(f"Check if {app_id} has seen 2 messages") - assert wait_until( - lambda: len( - consumers[app_id].list(f"{tc.URI_FANOUT}?id={app_id}", block=True) - ) - == 2, - 3, - ) + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + leader.outputs_regex(r"quux.*: status=alive", timeout) - logger.info("Check if quux has seen 1 message") - assert wait_until( - lambda: len(quux.list(f"{tc.URI_FANOUT}?id=quux", block=True)) == 1, 3 - ) - for app_id in all_app_ids: - assert ( - consumers[app_id].close(f"{tc.URI_FANOUT}?id={app_id}", block=True) - == Client.e_SUCCESS - ) +def test_load_domain_authorize_open_post(cluster: Cluster): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() - # Start the 'quux' consumer and then ensure that no alarm is raised at - # leader/primary when a consumer for a recently authorized appId is - # stopped and started. + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT + "_another", flags=["write"], succeed=True) - quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], succeed=True) - assert not leader.alarms() + # --------------------------------------------------------------------- + # Authorize 'quux'. + set_app_ids(cluster, authorized_app_ids + ["quux"]) - def test_create_authorize_open_post(self, cluster: Cluster): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() + # --------------------------------------------------------------------- + # Create a consumer for 'quux. This should succeed. - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write"], succeed=True) + quux = next(proxies).create_client("quux") + quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], succeed=True) - # --------------------------------------------------------------------- - # Authorize 'quux'. - set_app_ids(cluster, authorized_app_ids + ["quux"]) + # --------------------------------------------------------------------- + # Check that all substreams are alive. - # --------------------------------------------------------------------- - # Create a consumer for 'quux. This should succeed. + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + leader.outputs_regex(r"quux.*: status=alive", timeout) - quux = next(proxies).create_client("quux") - assert ( - quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) - == Client.e_SUCCESS - ) - - # --------------------------------------------------------------------- - # Check that all substreams are alive. - - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - leader.outputs_regex(r"quux.*: status=alive", timeout) - - def test_load_domain_authorize_open_post(self, cluster: Cluster): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT + "_another", flags=["write"], succeed=True) +# following test cannot run yet, because domain manager claims domain +# does not exist if no queue exists in it +def _test_authorize_before_domain_loaded(cluster): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() - # --------------------------------------------------------------------- - # Authorize 'quux'. - set_app_ids(cluster, authorized_app_ids + ["quux"]) + # --------------------------------------------------------------------- + # Authorize 'quux'. + set_app_ids(cluster, authorized_app_ids + ["quux"]) - # --------------------------------------------------------------------- - # Create a consumer for 'quux. This should succeed. + # --------------------------------------------------------------------- + # Create the queue. - quux = next(proxies).create_client("quux") - quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], succeed=True) + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write"], succeed=True) - # --------------------------------------------------------------------- - # Check that all substreams are alive. + # --------------------------------------------------------------------- + # Create a consumer for quux. This should succeed. - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - leader.outputs_regex(r"quux.*: status=alive", timeout) + quux = next(proxies).create_client("quux") + quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"]) + assert quux.outputs_regex(r"openQueue.*\[SUCCESS\]", timeout) - # following test cannot run yet, because domain manager claims domain - # does not exist if no queue exists in it - def _test_authorize_before_domain_loaded(self, cluster): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() + # --------------------------------------------------------------------- + # Check that all substreams are alive. - # --------------------------------------------------------------------- - # Authorize 'quux'. - set_app_ids(cluster, authorized_app_ids + ["quux"]) + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + leader.outputs_regex(r"quux.*: status=alive", timeout) - # --------------------------------------------------------------------- - # Create the queue. - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write"], succeed=True) +# following test cannot run yet, because domain manager claims domain +# does not exist if no queue exists in it +def _test_command_errors(cluster): + proxies = cluster.proxy_cycle() + next(proxies).create_client("producer") - # --------------------------------------------------------------------- - # Create a consumer for quux. This should succeed. + set_app_ids(cluster, authorized_app_ids + ["quux"]) - quux = next(proxies).create_client("quux") - quux.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"]) - assert quux.outputs_regex(r"openQueue.*\[SUCCESS\]", timeout) + set_app_ids(cluster, authorized_app_ids) - # --------------------------------------------------------------------- - # Check that all substreams are alive. - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - leader.outputs_regex(r"quux.*: status=alive", timeout) +def test_unregister_in_presence_of_queues(cluster: Cluster, logger): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() - # following test cannot run yet, because domain manager claims domain - # does not exist if no queue exists in it - def _test_command_errors(self, cluster): - proxies = cluster.proxy_cycle() - next(proxies).create_client("producer") + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - set_app_ids(cluster, authorized_app_ids + ["quux"]) + producer.post(tc.URI_FANOUT, ["before-unregister"], block=True) + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - set_app_ids(cluster, authorized_app_ids) + foo = next(proxies).create_client("foo") + foo.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + bar = next(proxies).create_client("bar") + bar.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) + baz = next(proxies).create_client("baz") + baz.open(tc.URI_FANOUT_BAZ, flags=["read"], succeed=True) - def test_unregister_in_presence_of_queues(self, cluster: Cluster, logger): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() + # In a moment we'll make sure no messages are sent to 'foo' after it + # has been unregistered, so we need to eat the push event for the + # message posted while 'foo' was still valid. + foo.wait_push_event() - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + set_app_ids(cluster, [a for a in authorized_app_ids if a not in ["foo"]]) - producer.post(tc.URI_FANOUT, ["before-unregister"], block=True) + @attempt(3) + def _(): leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + assert leader.outputs_substr("Num virtual storages: 2") + assert leader.outputs_substr("foo: status=unauthorized") - foo = next(proxies).create_client("foo") - foo.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - bar = next(proxies).create_client("bar") - bar.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) - baz = next(proxies).create_client("baz") - baz.open(tc.URI_FANOUT_BAZ, flags=["read"], succeed=True) - - # In a moment we'll make sure no messages are sent to 'foo' after it - # has been unregistered, so we need to eat the push event for the - # message posted while 'foo' was still valid. - foo.wait_push_event() - - set_app_ids(cluster, [a for a in authorized_app_ids if a not in ["foo"]]) - - @attempt(3) - def _(): - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - assert leader.outputs_substr("Num virtual storages: 2") - assert leader.outputs_substr("foo: status=unauthorized") - - logger.info("confirm msg 1 for bar, expecting 1 msg in storage") - time.sleep(1) # Let the message reach the proxy - bar.confirm(tc.URI_FANOUT_BAR, "+1", succeed=True) - - @attempt(3) - def _(): - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - assert leader.outputs_regex("Storage.*: 1 messages") - - logger.info("confirm msg 1 for baz, expecting 0 msg in storage") - time.sleep(1) # Let the message reach the proxy - baz.confirm(tc.URI_FANOUT_BAZ, "+1", succeed=True) - - @attempt(3) - def _(): - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - assert leader.outputs_regex("Storage.*: 0 messages") - - producer.post(tc.URI_FANOUT, ["after-unregister"], block=True) - - assert bar.wait_push_event() - assert len(bar.list(tc.URI_FANOUT_BAR, block=True)) == 1 - assert baz.wait_push_event() - assert len(baz.list(tc.URI_FANOUT_BAZ, block=True)) == 1 - - assert not foo.wait_push_event(timeout=1) - foo_msgs = foo.list(tc.URI_FANOUT_FOO, block=True) - assert len(foo_msgs) == 1 - assert foo_msgs[0].payload == "before-unregister" - - assert Client.e_SUCCESS == foo.confirm( - tc.URI_FANOUT_FOO, foo_msgs[0].guid, block=True - ) - assert Client.e_SUCCESS == foo.close(tc.URI_FANOUT_FOO, block=True) - - # Re-authorize - set_app_ids(cluster, authorized_app_ids) - - foo.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - producer.post(tc.URI_FANOUT, ["after-reauthorize"], block=True) - - @attempt(3) - def _(): - leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) - leader.outputs_regex(r"foo.*: status=alive") + logger.info("confirm msg 1 for bar, expecting 1 msg in storage") + time.sleep(1) # Let the message reach the proxy + bar.confirm(tc.URI_FANOUT_BAR, "+1", succeed=True) - assert foo.wait_push_event() - foo_msgs = foo.list(tc.URI_FANOUT_FOO, block=True) - assert len(foo_msgs) == 1 - assert foo_msgs[0].payload == "after-reauthorize" - - def test_dynamic_twice_alarm_once(self, cluster: Cluster): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() + @attempt(3) + def _(): + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + assert leader.outputs_regex("Storage.*: 1 messages") - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + logger.info("confirm msg 1 for baz, expecting 0 msg in storage") + time.sleep(1) # Let the message reach the proxy + baz.confirm(tc.URI_FANOUT_BAZ, "+1", succeed=True) - # --------------------------------------------------------------------- - # Create a consumer for the unauthorized substream. This should succeed - # but with an ALARM. + @attempt(3) + def _(): + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + assert leader.outputs_regex("Storage.*: 0 messages") - consumer1 = next(proxies).create_client("consumer1") - assert ( - consumer1.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) - == Client.e_SUCCESS - ) - assert leader.alarms() + producer.post(tc.URI_FANOUT, ["after-unregister"], block=True) - # --------------------------------------------------------------------- - # Create a consumer for the same unauthorized substream. This should - # succeed and no ALARM should be generated. + assert bar.wait_push_event() + assert len(bar.list(tc.URI_FANOUT_BAR, block=True)) == 1 + assert baz.wait_push_event() + assert len(baz.list(tc.URI_FANOUT_BAZ, block=True)) == 1 - leader.drain() - consumer2 = next(proxies).create_client("consumer2") - assert ( - consumer2.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) - == Client.e_SUCCESS - ) - assert not leader.alarms() + assert not foo.wait_push_event(timeout=1) + foo_msgs = foo.list(tc.URI_FANOUT_FOO, block=True) + assert len(foo_msgs) == 1 + assert foo_msgs[0].payload == "before-unregister" - # --------------------------------------------------------------------- - # Close both unauthorized substreams and re-open new one. It should - # succeed and alarm again. + assert Client.e_SUCCESS == foo.confirm( + tc.URI_FANOUT_FOO, foo_msgs[0].guid, block=True + ) + assert Client.e_SUCCESS == foo.close(tc.URI_FANOUT_FOO, block=True) - consumer1.close(f"{tc.URI_FANOUT}?id=quux", succeed=True) - consumer2.close(f"{tc.URI_FANOUT}?id=quux", succeed=True) + # Re-authorize + set_app_ids(cluster, authorized_app_ids) - assert ( - consumer2.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) - == Client.e_SUCCESS - ) - assert leader.alarms() - - @set_max_messages - def test_unauthorized_appid_doesnt_hold_messages(self, cluster: Cluster): - # Goal: check that dynamically allocated, but not yet authorized, - # substreams do not hold messages in fanout queues. - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() - - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - - # --------------------------------------------------------------------- - # fill queue to capacity - - for i in range(max_msgs): - producer.post(tc.URI_FANOUT, [f"msg{i}"], block=True) - if producer.outputs_regex("ERROR.*Failed ACK.*LIMIT_MESSAGES", timeout=0): - break - - # --------------------------------------------------------------------- - # dynamically create a substream - unauthorized_consumer = next(proxies).create_client("unauthorized_consumer") - unauthorized_consumer.open(f"{tc.URI_FANOUT}?id=unauthorized", flags=["read"]) - assert leader.alarms() - - # --------------------------------------------------------------------- - # consume all the messages in all the authorized substreams - - # pylint: disable=cell-var-from-loop; passing lambda to 'wait_until' is safe - for app_id in authorized_app_ids: - appid_uri = f"{tc.URI_FANOUT}?id={app_id}" - consumer = next(proxies).create_client(app_id) - consumer.open(appid_uri, flags=["read"], succeed=True) - assert consumer.wait_push_event() - assert wait_until( - lambda: len(consumer.list(appid_uri, block=True)) == max_msgs, 3 - ) - consumer.confirm(appid_uri, "*", succeed=True) + foo.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + producer.post(tc.URI_FANOUT, ["after-reauthorize"], block=True) - # --------------------------------------------------------------------- - # process a new message to confirm that 'unauthorized' substream did - # not hold messages - producer.post(tc.URI_FANOUT, ["newMsg"], block=True) - assert consumer.wait_push_event() - msgs = consumer.list(appid_uri, block=True) - assert len(msgs) == 1 - - @set_max_messages - def test_deauthorized_appid_doesnt_hold_messages(self, cluster: Cluster): - # Goal: check that dynamically de-authorized substreams do not hold - # messages in fanout queues. - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() - - # --------------------------------------------------------------------- - # force the leader to load the domain so we can unregister the appids - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - - # --------------------------------------------------------------------- - # and remove all the queues otherwise unregistration will fail - producer.close(tc.URI_FANOUT, succeed=True) - leader.force_gc_queues(succeed=True) - - # --------------------------------------------------------------------- - # unauthorize 'bar' and 'baz' - set_app_ids(cluster, [a for a in authorized_app_ids if a not in ["bar", "baz"]]) - - # --------------------------------------------------------------------- - # fill queue to capacity - time.sleep(1) - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - num_msgs = 4 - - for i in range(0, num_msgs): - producer.post(tc.URI_FANOUT, [f"msg{i}"], succeed=True) - - # --------------------------------------------------------------------- - # consume messages in the 'foo' substream - appid_uri = f"{tc.URI_FANOUT}?id=foo" - consumer = next(proxies).create_client("foo") + @attempt(3) + def _(): + leader.dump_queue_internals(tc.DOMAIN_FANOUT, tc.TEST_QUEUE) + leader.outputs_regex(r"foo.*: status=alive") + + assert foo.wait_push_event() + foo_msgs = foo.list(tc.URI_FANOUT_FOO, block=True) + assert len(foo_msgs) == 1 + assert foo_msgs[0].payload == "after-reauthorize" + + +def test_dynamic_twice_alarm_once(cluster: Cluster): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() + + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + + # --------------------------------------------------------------------- + # Create a consumer for the unauthorized substream. This should succeed + # but with an ALARM. + + consumer1 = next(proxies).create_client("consumer1") + assert ( + consumer1.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) + == Client.e_SUCCESS + ) + assert leader.alarms() + + # --------------------------------------------------------------------- + # Create a consumer for the same unauthorized substream. This should + # succeed and no ALARM should be generated. + + leader.drain() + consumer2 = next(proxies).create_client("consumer2") + assert ( + consumer2.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) + == Client.e_SUCCESS + ) + assert not leader.alarms() + + # --------------------------------------------------------------------- + # Close both unauthorized substreams and re-open new one. It should + # succeed and alarm again. + + consumer1.close(f"{tc.URI_FANOUT}?id=quux", succeed=True) + consumer2.close(f"{tc.URI_FANOUT}?id=quux", succeed=True) + + assert ( + consumer2.open(f"{tc.URI_FANOUT}?id=quux", flags=["read"], block=True) + == Client.e_SUCCESS + ) + assert leader.alarms() + + +@set_max_messages +def test_unauthorized_appid_doesnt_hold_messages(cluster: Cluster): + # Goal: check that dynamically allocated, but not yet authorized, + # substreams do not hold messages in fanout queues. + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() + + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + + # --------------------------------------------------------------------- + # fill queue to capacity + + for i in range(max_msgs): + producer.post(tc.URI_FANOUT, [f"msg{i}"], block=True) + if producer.outputs_regex("ERROR.*Failed ACK.*LIMIT_MESSAGES", timeout=0): + break + + # --------------------------------------------------------------------- + # dynamically create a substream + unauthorized_consumer = next(proxies).create_client("unauthorized_consumer") + unauthorized_consumer.open(f"{tc.URI_FANOUT}?id=unauthorized", flags=["read"]) + assert leader.alarms() + + # --------------------------------------------------------------------- + # consume all the messages in all the authorized substreams + + # pylint: disable=cell-var-from-loop; passing lambda to 'wait_until' is safe + for app_id in authorized_app_ids: + appid_uri = f"{tc.URI_FANOUT}?id={app_id}" + consumer = next(proxies).create_client(app_id) consumer.open(appid_uri, flags=["read"], succeed=True) assert consumer.wait_push_event() assert wait_until( - lambda: len(consumer.list(appid_uri, block=True)) == num_msgs, 3 + lambda: len(consumer.list(appid_uri, block=True)) == max_msgs, 3 ) - msgs = consumer.list(appid_uri, block=True) - for _ in msgs: - consumer.confirm(appid_uri, "+1", succeed=True) - - # process a new message to confirm that 'bar' and 'baz' substreams did - # not hold messages - producer.post(tc.URI_FANOUT, ["newMsg"], block=True) - assert consumer.wait_push_event() - msgs = consumer.list(appid_uri, block=True) - assert len(msgs) == 1 - - def test_unauthorization(self, cluster: Cluster): - # Goal: check that dynamically unauthorizing apps with live consumers - # invalidates their virtual iterators - proxies = cluster.proxy_cycle() - - # --------------------------------------------------------------------- - # get producer and "foo" consumer - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - - appid_uri = f"{tc.URI_FANOUT}?id=foo" - consumer = next(proxies).create_client("foo") - consumer.open(appid_uri, flags=["read"], succeed=True) - - producer.post(tc.URI_FANOUT, ["msg1"], succeed=True) - - # --------------------------------------------------------------------- - # unauthorize everything - set_app_ids(cluster, []) - - # --------------------------------------------------------------------- - # if iterators are not invalidated, 'afterNewMessage' will crash - producer.post(tc.URI_FANOUT, ["msg2"], succeed=True) - - # --------------------------------------------------------------------- - # check if the leader is still there - appid_uri = f"{tc.URI_FANOUT}?id=bar" - consumer = next(proxies).create_client("bar") - consumer.open(appid_uri, flags=["read"], succeed=True) - - def test_two_consumers_of_unauthorized_app(self, standard_cluster: Cluster): - """DRQS 167201621: First client open authorized and unauthorized apps; - second client opens unauthorized app. - Then, primary shuts down causing replica to issue wildcard close - requests to primary. - """ - - leader = standard_cluster.last_known_leader - - replica1 = standard_cluster.nodes()[0] - if replica1 == leader: - replica1 = standard_cluster.nodes()[1] - - # --------------------------------------------------------------------- - # Two "foo" and "unauthorized" consumers - consumer1 = replica1.create_client("consumer1") - consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - consumer1.open(f"{tc.URI_FANOUT}?id=unauthorized", flags=["read"], succeed=True) - - replica2 = standard_cluster.nodes()[2] - if replica2 == leader: - replica2 = standard_cluster.nodes()[3] - - consumer2 = replica2.create_client("consumer2") - consumer2.open(f"{tc.URI_FANOUT}?id=unauthorized", flags=["read"], succeed=True) - - # --------------------------------------------------------------------- - # shutdown and wait - - leader.stop() + consumer.confirm(appid_uri, "*", succeed=True) + + # --------------------------------------------------------------------- + # process a new message to confirm that 'unauthorized' substream did + # not hold messages + producer.post(tc.URI_FANOUT, ["newMsg"], block=True) + assert consumer.wait_push_event() + msgs = consumer.list(appid_uri, block=True) + assert len(msgs) == 1 + + +@set_max_messages +def test_deauthorized_appid_doesnt_hold_messages(cluster: Cluster): + # Goal: check that dynamically de-authorized substreams do not hold + # messages in fanout queues. + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() + + # --------------------------------------------------------------------- + # force the leader to load the domain so we can unregister the appids + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + + # --------------------------------------------------------------------- + # and remove all the queues otherwise unregistration will fail + producer.close(tc.URI_FANOUT, succeed=True) + leader.force_gc_queues(succeed=True) + + # --------------------------------------------------------------------- + # unauthorize 'bar' and 'baz' + set_app_ids(cluster, [a for a in authorized_app_ids if a not in ["bar", "baz"]]) + + # --------------------------------------------------------------------- + # fill queue to capacity + time.sleep(1) + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + num_msgs = 4 + + for i in range(0, num_msgs): + producer.post(tc.URI_FANOUT, [f"msg{i}"], succeed=True) + + # --------------------------------------------------------------------- + # consume messages in the 'foo' substream + appid_uri = f"{tc.URI_FANOUT}?id=foo" + consumer = next(proxies).create_client("foo") + consumer.open(appid_uri, flags=["read"], succeed=True) + assert consumer.wait_push_event() + assert wait_until(lambda: len(consumer.list(appid_uri, block=True)) == num_msgs, 3) + msgs = consumer.list(appid_uri, block=True) + for _ in msgs: + consumer.confirm(appid_uri, "+1", succeed=True) + + # process a new message to confirm that 'bar' and 'baz' substreams did + # not hold messages + producer.post(tc.URI_FANOUT, ["newMsg"], block=True) + assert consumer.wait_push_event() + msgs = consumer.list(appid_uri, block=True) + assert len(msgs) == 1 + + +def test_unauthorization(cluster: Cluster): + # Goal: check that dynamically unauthorizing apps with live consumers + # invalidates their virtual iterators + proxies = cluster.proxy_cycle() + + # --------------------------------------------------------------------- + # get producer and "foo" consumer + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) + + appid_uri = f"{tc.URI_FANOUT}?id=foo" + consumer = next(proxies).create_client("foo") + consumer.open(appid_uri, flags=["read"], succeed=True) + + producer.post(tc.URI_FANOUT, ["msg1"], succeed=True) + + # --------------------------------------------------------------------- + # unauthorize everything + set_app_ids(cluster, []) + + # --------------------------------------------------------------------- + # if iterators are not invalidated, 'afterNewMessage' will crash + producer.post(tc.URI_FANOUT, ["msg2"], succeed=True) + + # --------------------------------------------------------------------- + # check if the leader is still there + appid_uri = f"{tc.URI_FANOUT}?id=bar" + consumer = next(proxies).create_client("bar") + consumer.open(appid_uri, flags=["read"], succeed=True) + + +def test_two_consumers_of_unauthorized_app(standard_cluster: Cluster): + """DRQS 167201621: First client open authorized and unauthorized apps; + second client opens unauthorized app. + Then, primary shuts down causing replica to issue wildcard close + requests to primary. + """ + + leader = standard_cluster.last_known_leader + + replica1 = standard_cluster.nodes()[0] + if replica1 == leader: + replica1 = standard_cluster.nodes()[1] + + # --------------------------------------------------------------------- + # Two "foo" and "unauthorized" consumers + consumer1 = replica1.create_client("consumer1") + consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + consumer1.open(f"{tc.URI_FANOUT}?id=unauthorized", flags=["read"], succeed=True) + + replica2 = standard_cluster.nodes()[2] + if replica2 == leader: + replica2 = standard_cluster.nodes()[3] + + consumer2 = replica2.create_client("consumer2") + consumer2.open(f"{tc.URI_FANOUT}?id=unauthorized", flags=["read"], succeed=True) + + # --------------------------------------------------------------------- + # shutdown and wait + + leader.stop() diff --git a/src/integration-tests/test_breathing.py b/src/integration-tests/test_breathing.py index 3383da5886..d32a0b6216 100644 --- a/src/integration-tests/test_breathing.py +++ b/src/integration-tests/test_breathing.py @@ -1,6 +1,12 @@ +""" +This test suite exercises basic routing functionality to in the presence of all +types of queues. +""" + from collections import namedtuple import bmq.dev.it.testconstants as tc +import pytest from bmq.dev.it.fixtures import ( # pylint: disable=unused-import Cluster, cartesian_product_cluster, @@ -12,912 +18,860 @@ from bmq.dev.it.process.client import Client from bmq.dev.it.util import wait_until -BmqClient = namedtuple("BmqClient", "handle, uri") - - -class TestBreathing: - """This suite of test cases exercises basic routing functionality to - in the presence of all types of queues. - """ - - def _close_clients(self, clients, uris): - for client, uri in zip(clients, uris): - assert client.close(uri, block=True) == Client.e_SUCCESS - - def _stop_clients(self, clients): - for client in clients: - assert client.stop_session(block=True) == Client.e_SUCCESS - - def _verify_delivery(self, consumer, uri, messages, timeout=2): - consumer.wait_push_event() - assert wait_until( - lambda: len(consumer.list(uri, block=True)) == len(messages), timeout - ) - consumer.list(uri, block=True) - - def _verify_delivery_and_confirm(self, consumer, uri, messages): - self._verify_delivery(consumer, uri, messages) - assert consumer.confirm(uri, "*", block=True) == Client.e_SUCCESS - - def _verify_delivery_and_confirm_balanced( - self, consumer, uris, messages, timeout=3 - ): - consumer.wait_push_event() - - def wait_cond(): - return sum(map(lambda u: len(consumer.list(u, block=True)), uris)) == len( - messages - ) - - assert wait_until(wait_cond, timeout) +pytestmark = pytest.mark.order(0) - msgs = [] - for uri in uris: - uri_msgs = consumer.list(uri, block=True) - - # Ensure each uri has received part of messages - assert len(uri_msgs) > 0 - - msgs.extend(uri_msgs) - - # We cannot rely on the order of incoming messages so we just sort both lists - messages.sort() - msgs.sort(key=lambda msg: msg.payload) - - for i, message in enumerate(messages): - assert msgs[i].payload == message - - for uri in uris: - assert consumer.confirm(uri, "*", block=True) == Client.e_SUCCESS - - def _verify_max_messages_max_bytes_routing( - self, producer, consumer, other_consumers - ): - # Verify no messages when we start - - try: - assert len(consumer.handle.list(consumer.uri, block=True)) == 0 - except RuntimeError: - pass # No messages, that's what we want - - # verify maxUnconfirmedBytes. Post message exceeding max - assert ( - producer.handle.post( - producer.uri, - payload=["123"], - block=True, - wait_ack=True, - ) - == Client.e_SUCCESS - ) - - self._verify_delivery(consumer.handle, consumer.uri, ["123"]) - # not confirming - - for anotherConsumer in other_consumers: - self._verify_delivery_and_confirm( - anotherConsumer.handle, anotherConsumer.uri, ["123"] - ) - - assert ( - producer.handle.post(producer.uri, payload=["1"], block=True, wait_ack=True) - == Client.e_SUCCESS - ) - - for anotherConsumer in other_consumers: - self._verify_delivery_and_confirm( - anotherConsumer.handle, anotherConsumer.uri, ["1"] - ) - - # consumer is over maxUnconfirmedBytes (3) - # assert no PUSH received within 1 second - assert not consumer.handle.outputs_regex("MESSAGE.*PUSH", timeout=1) - msgs = consumer.handle.list(consumer.uri, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "123" - - assert ( - consumer.handle.confirm(consumer.uri, "*", block=True) == Client.e_SUCCESS - ) - - # onHandleUsable kicks in - self._verify_delivery(consumer.handle, consumer.uri, ["1"]) - # not confirming - - # verify maxUnconfirmedMessages - assert ( - producer.handle.post(producer.uri, payload=["2"], block=True, wait_ack=True) - == Client.e_SUCCESS - ) - - for anotherConsumer in other_consumers: - self._verify_delivery_and_confirm( - anotherConsumer.handle, anotherConsumer.uri, ["2"] - ) - - self._verify_delivery(consumer.handle, consumer.uri, ["1", "2"]) - # not confirming - - assert ( - producer.handle.post(producer.uri, payload=["3"], block=True, wait_ack=True) - == Client.e_SUCCESS - ) - - for anotherConsumer in other_consumers: - self._verify_delivery_and_confirm( - anotherConsumer.handle, anotherConsumer.uri, ["3"] - ) +BmqClient = namedtuple("BmqClient", "handle, uri") - # consumer is over maxUnconfirmedMessages (2) - # assert no PUSH received within 1 second - assert not consumer.handle.outputs_regex("MESSAGE.*PUSH", timeout=1) - msgs = consumer.handle.list(consumer.uri, block=True) - assert len(msgs) == 2 - assert msgs[0].payload == "1" - assert msgs[1].payload == "2" - assert ( - consumer.handle.confirm(consumer.uri, "*", block=True) == Client.e_SUCCESS - ) +def _close_clients(clients, uris): + for client, uri in zip(clients, uris): + assert client.close(uri, block=True) == Client.e_SUCCESS - # onHandleUsable kicks in - self._verify_delivery(consumer.handle, consumer.uri, ["3"]) - - def _verify_priority_routing(self, producers, consumers, lowPriorityConsumers): - - # Verify no messages when we start - for consumer in consumers + lowPriorityConsumers: - try: - assert len(consumer.list(tc.URI_PRIORITY, block=True)) == 0 - except RuntimeError: - pass # No messages, that's what we want - - # Route messages and verify - for producer in producers: - assert ( - producer.post( - tc.URI_PRIORITY, payload=["msg"], block=True, wait_ack=True - ) - == Client.e_SUCCESS - ) - - for consumer in consumers: - self._verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - - for consumer in lowPriorityConsumers: - # assert no PUSH received within 1 second - assert not consumer.outputs_regex("MESSAGE.*PUSH", timeout=1) - assert not consumer.list(tc.URI_PRIORITY, block=True) - - def test_open_queue(self, cartesian_product_cluster: Cluster): - cluster = cartesian_product_cluster - [consumer] = cluster.open_priority_queues(1, flags=["read"]) - [producer] = cluster.open_priority_queues(1, flags=["write"]) - producer.post(payload=["foo"], succeed=True) - consumer.client.wait_push_event() - msgs = consumer.list(block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "foo" - def test_verify_priority(self, cluster: Cluster): - proxies = cluster.proxy_cycle() +def _stop_clients(clients): + for client in clients: + assert client.stop_session(block=True) == Client.e_SUCCESS - # 1: Setup producers and consumers - # Proxy in same datacenter as leader/primary - proxy1 = next(proxies) - producer1 = proxy1.create_client("producer1") - assert ( - producer1.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) +def _verify_delivery(consumer, uri, messages, timeout=2): + consumer.wait_push_event() + assert wait_until( + lambda: len(consumer.list(uri, block=True)) == len(messages), timeout + ) + consumer.list(uri, block=True) - consumer1 = proxy1.create_client("consumer1") - assert ( - consumer1.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True - ) - == Client.e_SUCCESS - ) - # Replica proxy - proxy2 = next(proxies) +def _verify_delivery_and_confirm(consumer, uri, messages): + _verify_delivery(consumer, uri, messages) + assert consumer.confirm(uri, "*", block=True) == Client.e_SUCCESS - producer2 = proxy2.create_client("producer2") - assert ( - producer2.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) - consumer2 = proxy2.create_client("consumer2") - assert ( - consumer2.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True - ) - == Client.e_SUCCESS - ) +def _verify_delivery_and_confirm_balanced(consumer, uris, messages, timeout=3): + consumer.wait_push_event() - consumer3 = proxy1.create_client("consumer3") - assert ( - consumer3.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True - ) - == Client.e_SUCCESS + def wait_cond(): + return sum(map(lambda u: len(consumer.list(u, block=True)), uris)) == len( + messages ) - consumer4 = proxy2.create_client("consumer4") - assert ( - consumer4.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True - ) - == Client.e_SUCCESS - ) + assert wait_until(wait_cond, timeout) - # 2: Route messages and verify - self._verify_priority_routing( - [producer1, producer2], [consumer1, consumer2], [consumer3, consumer4] - ) + msgs = [] + for uri in uris: + uri_msgs = consumer.list(uri, block=True) - # 3: Close everything - self._close_clients( - [producer1, consumer1, producer2, consumer2, consumer3, consumer4], - [tc.URI_PRIORITY], - ) - self._stop_clients( - [producer1, consumer1, producer2, consumer2, consumer3, consumer4] - ) + # Ensure each uri has received part of messages + assert len(uri_msgs) > 0 - # 4: Repeat the test with reeverse order of opening clients (consumers - # first). - consumer1 = proxy1.create_client("consumer1") - assert ( - consumer1.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True - ) - == Client.e_SUCCESS - ) + msgs.extend(uri_msgs) - consumer2 = proxy2.create_client("consumer2") - assert ( - consumer2.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True - ) - == Client.e_SUCCESS - ) + # We cannot rely on the order of incoming messages so we just sort both lists + messages.sort() + msgs.sort(key=lambda msg: msg.payload) - consumer3 = proxy1.create_client("consumer3") - assert ( - consumer3.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True - ) - == Client.e_SUCCESS - ) + for i, message in enumerate(messages): + assert msgs[i].payload == message - consumer4 = proxy2.create_client("consumer4") - assert ( - consumer4.open( - tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True - ) - == Client.e_SUCCESS - ) + for uri in uris: + assert consumer.confirm(uri, "*", block=True) == Client.e_SUCCESS - producer2 = proxy2.create_client("producer2") - assert ( - producer2.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) - producer1 = proxy1.create_client("producer1") - assert ( - producer1.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) +def _verify_max_messages_max_bytes_routing(producer, consumer, other_consumers): + # Verify no messages when we start - # 5: Route messages and verify - self._verify_priority_routing( - [producer1, producer2], [consumer1, consumer2], [consumer3, consumer4] - ) + try: + assert len(consumer.handle.list(consumer.uri, block=True)) == 0 + except RuntimeError: + pass # No messages, that's what we want - # 6: test maxUnconfirmedMessages, maxUnconfirmedBytes - assert ( - consumer2.configure( - tc.URI_PRIORITY, - consumer_priority=3, - max_unconfirmed_messages=2, - max_unconfirmed_bytes=3, - block=True, - ) - == Client.e_SUCCESS + # verify maxUnconfirmedBytes. Post message exceeding max + assert ( + producer.handle.post( + producer.uri, + payload=["123"], + block=True, + wait_ack=True, ) + == Client.e_SUCCESS + ) - self._verify_max_messages_max_bytes_routing( - BmqClient(producer2, tc.URI_PRIORITY), - BmqClient(consumer2, tc.URI_PRIORITY), - [], - ) + _verify_delivery(consumer.handle, consumer.uri, ["123"]) + # not confirming - # 7: Close everything - self._close_clients( - [producer1, consumer1, producer2, consumer2, consumer3, consumer4], - [tc.URI_PRIORITY], + for anotherConsumer in other_consumers: + _verify_delivery_and_confirm( + anotherConsumer.handle, anotherConsumer.uri, ["123"] ) - self._stop_clients( - [producer1, consumer1, producer2, consumer2, consumer3, consumer4] - ) - - def test_verify_fanout(self, cluster: Cluster): - # 1: Setup producers and consumers - proxies = cluster.proxy_cycle() - - # Proxy in same datacenter as leader/primary - proxy1 = next(proxies) - fooConsumerAndProducerOnPrimaryProxy = proxy1.create_client( - "fooConsumerAndProducerOnPrimaryProxy" - ) + assert ( + producer.handle.post(producer.uri, payload=["1"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) - # testing {client1 open "foo" for read, client2 open "bar" for read, - # client1 open for write} sequence (RDSIBMQ-1008). + for anotherConsumer in other_consumers: + _verify_delivery_and_confirm(anotherConsumer.handle, anotherConsumer.uri, ["1"]) - assert ( - fooConsumerAndProducerOnPrimaryProxy.open( - tc.URI_FANOUT_FOO, flags=["read"], block=True - ) - == Client.e_SUCCESS - ) + # consumer is over maxUnconfirmedBytes (3) + # assert no PUSH received within 1 second + assert not consumer.handle.outputs_regex("MESSAGE.*PUSH", timeout=1) + msgs = consumer.handle.list(consumer.uri, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "123" - barConsumerOnPrimaryProxy = proxy1.create_client("barConsumerOnPrimaryProxy") - assert ( - barConsumerOnPrimaryProxy.open( - tc.URI_FANOUT_BAR, flags=["read"], block=True - ) - == Client.e_SUCCESS - ) + assert consumer.handle.confirm(consumer.uri, "*", block=True) == Client.e_SUCCESS - assert ( - fooConsumerAndProducerOnPrimaryProxy.open( - tc.URI_FANOUT, flags=["write", "ack"], block=True - ) - == Client.e_SUCCESS - ) + # onHandleUsable kicks in + _verify_delivery(consumer.handle, consumer.uri, ["1"]) + # not confirming - assert ( - barConsumerOnPrimaryProxy.close(tc.URI_FANOUT_BAR, block=True) - == Client.e_SUCCESS - ) + # verify maxUnconfirmedMessages + assert ( + producer.handle.post(producer.uri, payload=["2"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) - # Replica proxy - proxy2 = next(proxies) + for anotherConsumer in other_consumers: + _verify_delivery_and_confirm(anotherConsumer.handle, anotherConsumer.uri, ["2"]) - producerOnReplicaProxy = proxy2.create_client("producerOnReplicaProxy") - assert ( - producerOnReplicaProxy.open( - tc.URI_FANOUT, flags=["write", "ack"], block=True - ) - == Client.e_SUCCESS - ) + _verify_delivery(consumer.handle, consumer.uri, ["1", "2"]) + # not confirming - barConsumerOnReplicaProxy = proxy2.create_client("barConsumerOnReplicaProxy") - assert ( - barConsumerOnReplicaProxy.open( - tc.URI_FANOUT_BAR, flags=["read"], block=True - ) - == Client.e_SUCCESS - ) + assert ( + producer.handle.post(producer.uri, payload=["3"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) - assert ( - len( - fooConsumerAndProducerOnPrimaryProxy.list(tc.URI_FANOUT_FOO, block=True) - ) - == 0 - ) - assert len(barConsumerOnReplicaProxy.list(tc.URI_FANOUT_BAR, block=True)) == 0 + for anotherConsumer in other_consumers: + _verify_delivery_and_confirm(anotherConsumer.handle, anotherConsumer.uri, ["3"]) - # 2: Route messages and verify - assert ( - fooConsumerAndProducerOnPrimaryProxy.post( - tc.URI_FANOUT, payload=["msg1"], block=True, wait_ack=True - ) - == Client.e_SUCCESS - ) + # consumer is over maxUnconfirmedMessages (2) + # assert no PUSH received within 1 second + assert not consumer.handle.outputs_regex("MESSAGE.*PUSH", timeout=1) + msgs = consumer.handle.list(consumer.uri, block=True) + assert len(msgs) == 2 + assert msgs[0].payload == "1" + assert msgs[1].payload == "2" - self._verify_delivery_and_confirm( - fooConsumerAndProducerOnPrimaryProxy, tc.URI_FANOUT_FOO, ["msg1"] - ) + assert consumer.handle.confirm(consumer.uri, "*", block=True) == Client.e_SUCCESS - self._verify_delivery_and_confirm( - barConsumerOnReplicaProxy, tc.URI_FANOUT_BAR, ["msg1"] - ) + # onHandleUsable kicks in + _verify_delivery(consumer.handle, consumer.uri, ["3"]) - assert ( - producerOnReplicaProxy.post( - tc.URI_FANOUT, payload=["msg2"], block=True, wait_ack=True - ) - == Client.e_SUCCESS - ) - self._verify_delivery_and_confirm( - fooConsumerAndProducerOnPrimaryProxy, tc.URI_FANOUT_FOO, ["msg2"] - ) +def _verify_priority_routing(producers, consumers, lowPriorityConsumers): - self._verify_delivery_and_confirm( - barConsumerOnReplicaProxy, tc.URI_FANOUT_BAR, ["msg2"] - ) + # Verify no messages when we start + for consumer in consumers + lowPriorityConsumers: + try: + assert len(consumer.list(tc.URI_PRIORITY, block=True)) == 0 + except RuntimeError: + pass # No messages, that's what we want - # 3: test maxUnconfirmedMessages, maxUnconfirmedBytes + # Route messages and verify + for producer in producers: assert ( - barConsumerOnReplicaProxy.configure( - tc.URI_FANOUT_BAR, - max_unconfirmed_messages=2, - max_unconfirmed_bytes=3, - block=True, - ) + producer.post(tc.URI_PRIORITY, payload=["msg"], block=True, wait_ack=True) == Client.e_SUCCESS ) - self._verify_max_messages_max_bytes_routing( - BmqClient(handle=producerOnReplicaProxy, uri=tc.URI_FANOUT), - BmqClient(handle=barConsumerOnReplicaProxy, uri=tc.URI_FANOUT_BAR), - [ - BmqClient( - handle=fooConsumerAndProducerOnPrimaryProxy, uri=tc.URI_FANOUT_FOO - ) - ], - ) + for consumer in consumers: + _verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - # 4: Close everything - self._close_clients( - [ - fooConsumerAndProducerOnPrimaryProxy, - fooConsumerAndProducerOnPrimaryProxy, - producerOnReplicaProxy, - barConsumerOnReplicaProxy, - ], - [tc.URI_FANOUT, tc.URI_FANOUT_FOO, tc.URI_FANOUT, tc.URI_FANOUT_BAR], - ) + for consumer in lowPriorityConsumers: + # assert no PUSH received within 1 second + assert not consumer.outputs_regex("MESSAGE.*PUSH", timeout=1) + assert not consumer.list(tc.URI_PRIORITY, block=True) + + +def test_open_queue(cartesian_product_cluster: Cluster): + cluster = cartesian_product_cluster + [consumer] = cluster.open_priority_queues(1, flags=["read"]) + [producer] = cluster.open_priority_queues(1, flags=["write"]) + producer.post(payload=["foo"], succeed=True) + consumer.client.wait_push_event() + msgs = consumer.list(block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "foo" + + +def test_verify_priority(cluster: Cluster): + proxies = cluster.proxy_cycle() + + # 1: Setup producers and consumers + # Proxy in same datacenter as leader/primary + proxy1 = next(proxies) + + producer1 = proxy1.create_client("producer1") + assert ( + producer1.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + consumer1 = proxy1.create_client("consumer1") + assert ( + consumer1.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True) + == Client.e_SUCCESS + ) + + # Replica proxy + proxy2 = next(proxies) + + producer2 = proxy2.create_client("producer2") + assert ( + producer2.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + consumer2 = proxy2.create_client("consumer2") + assert ( + consumer2.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True) + == Client.e_SUCCESS + ) + + consumer3 = proxy1.create_client("consumer3") + assert ( + consumer3.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True) + == Client.e_SUCCESS + ) + + consumer4 = proxy2.create_client("consumer4") + assert ( + consumer4.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True) + == Client.e_SUCCESS + ) + + # 2: Route messages and verify + _verify_priority_routing( + [producer1, producer2], [consumer1, consumer2], [consumer3, consumer4] + ) + + # 3: Close everything + _close_clients( + [producer1, consumer1, producer2, consumer2, consumer3, consumer4], + [tc.URI_PRIORITY], + ) + _stop_clients([producer1, consumer1, producer2, consumer2, consumer3, consumer4]) + + # 4: Repeat the test with reeverse order of opening clients (consumers + # first). + consumer1 = proxy1.create_client("consumer1") + assert ( + consumer1.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True) + == Client.e_SUCCESS + ) + + consumer2 = proxy2.create_client("consumer2") + assert ( + consumer2.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=2, block=True) + == Client.e_SUCCESS + ) + + consumer3 = proxy1.create_client("consumer3") + assert ( + consumer3.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True) + == Client.e_SUCCESS + ) + + consumer4 = proxy2.create_client("consumer4") + assert ( + consumer4.open(tc.URI_PRIORITY, flags=["read"], consumer_priority=1, block=True) + == Client.e_SUCCESS + ) + + producer2 = proxy2.create_client("producer2") + assert ( + producer2.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + producer1 = proxy1.create_client("producer1") + assert ( + producer1.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + # 5: Route messages and verify + _verify_priority_routing( + [producer1, producer2], [consumer1, consumer2], [consumer3, consumer4] + ) + + # 6: test maxUnconfirmedMessages, maxUnconfirmedBytes + assert ( + consumer2.configure( + tc.URI_PRIORITY, + consumer_priority=3, + max_unconfirmed_messages=2, + max_unconfirmed_bytes=3, + block=True, + ) + == Client.e_SUCCESS + ) + + _verify_max_messages_max_bytes_routing( + BmqClient(producer2, tc.URI_PRIORITY), + BmqClient(consumer2, tc.URI_PRIORITY), + [], + ) + + # 7: Close everything + _close_clients( + [producer1, consumer1, producer2, consumer2, consumer3, consumer4], + [tc.URI_PRIORITY], + ) + _stop_clients([producer1, consumer1, producer2, consumer2, consumer3, consumer4]) + + +def test_verify_fanout(cluster: Cluster): + # 1: Setup producers and consumers + proxies = cluster.proxy_cycle() + + # Proxy in same datacenter as leader/primary + proxy1 = next(proxies) + + fooConsumerAndProducerOnPrimaryProxy = proxy1.create_client( + "fooConsumerAndProducerOnPrimaryProxy" + ) + + # testing {client1 open "foo" for read, client2 open "bar" for read, + # client1 open for write} sequence (RDSIBMQ-1008). + + assert ( + fooConsumerAndProducerOnPrimaryProxy.open( + tc.URI_FANOUT_FOO, flags=["read"], block=True + ) + == Client.e_SUCCESS + ) + + barConsumerOnPrimaryProxy = proxy1.create_client("barConsumerOnPrimaryProxy") + assert ( + barConsumerOnPrimaryProxy.open(tc.URI_FANOUT_BAR, flags=["read"], block=True) + == Client.e_SUCCESS + ) + + assert ( + fooConsumerAndProducerOnPrimaryProxy.open( + tc.URI_FANOUT, flags=["write", "ack"], block=True + ) + == Client.e_SUCCESS + ) + + assert ( + barConsumerOnPrimaryProxy.close(tc.URI_FANOUT_BAR, block=True) + == Client.e_SUCCESS + ) + + # Replica proxy + proxy2 = next(proxies) + + producerOnReplicaProxy = proxy2.create_client("producerOnReplicaProxy") + assert ( + producerOnReplicaProxy.open(tc.URI_FANOUT, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + barConsumerOnReplicaProxy = proxy2.create_client("barConsumerOnReplicaProxy") + assert ( + barConsumerOnReplicaProxy.open(tc.URI_FANOUT_BAR, flags=["read"], block=True) + == Client.e_SUCCESS + ) + + assert ( + len(fooConsumerAndProducerOnPrimaryProxy.list(tc.URI_FANOUT_FOO, block=True)) + == 0 + ) + assert len(barConsumerOnReplicaProxy.list(tc.URI_FANOUT_BAR, block=True)) == 0 + + # 2: Route messages and verify + assert ( + fooConsumerAndProducerOnPrimaryProxy.post( + tc.URI_FANOUT, payload=["msg1"], block=True, wait_ack=True + ) + == Client.e_SUCCESS + ) + + _verify_delivery_and_confirm( + fooConsumerAndProducerOnPrimaryProxy, tc.URI_FANOUT_FOO, ["msg1"] + ) + + _verify_delivery_and_confirm(barConsumerOnReplicaProxy, tc.URI_FANOUT_BAR, ["msg1"]) + + assert ( + producerOnReplicaProxy.post( + tc.URI_FANOUT, payload=["msg2"], block=True, wait_ack=True + ) + == Client.e_SUCCESS + ) + + _verify_delivery_and_confirm( + fooConsumerAndProducerOnPrimaryProxy, tc.URI_FANOUT_FOO, ["msg2"] + ) + + _verify_delivery_and_confirm(barConsumerOnReplicaProxy, tc.URI_FANOUT_BAR, ["msg2"]) + + # 3: test maxUnconfirmedMessages, maxUnconfirmedBytes + assert ( + barConsumerOnReplicaProxy.configure( + tc.URI_FANOUT_BAR, + max_unconfirmed_messages=2, + max_unconfirmed_bytes=3, + block=True, + ) + == Client.e_SUCCESS + ) + + _verify_max_messages_max_bytes_routing( + BmqClient(handle=producerOnReplicaProxy, uri=tc.URI_FANOUT), + BmqClient(handle=barConsumerOnReplicaProxy, uri=tc.URI_FANOUT_BAR), + [BmqClient(handle=fooConsumerAndProducerOnPrimaryProxy, uri=tc.URI_FANOUT_FOO)], + ) + + # 4: Close everything + _close_clients( + [ + fooConsumerAndProducerOnPrimaryProxy, + fooConsumerAndProducerOnPrimaryProxy, + producerOnReplicaProxy, + barConsumerOnReplicaProxy, + ], + [tc.URI_FANOUT, tc.URI_FANOUT_FOO, tc.URI_FANOUT, tc.URI_FANOUT_BAR], + ) + + _stop_clients( + [ + fooConsumerAndProducerOnPrimaryProxy, + producerOnReplicaProxy, + barConsumerOnReplicaProxy, + ] + ) + + +def test_verify_broadcast(cluster: Cluster): + # 1: Setup producers and consumers + proxies = cluster.proxy_cycle() + + # Proxy in same datacenter as leader/primary + proxy1 = next(proxies) + + producer1 = proxy1.create_client("producer1") + assert ( + producer1.open(tc.URI_BROADCAST, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + consumer1 = proxy1.create_client("consumer1") + assert ( + consumer1.open(tc.URI_BROADCAST, flags=["read"], block=True) == Client.e_SUCCESS + ) + + # Replica proxy + proxy2 = next(proxies) + + producer2 = proxy2.create_client("producer2") + assert ( + producer2.open(tc.URI_BROADCAST, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) + + consumer2 = proxy2.create_client("consumer2") + assert ( + consumer2.open(tc.URI_BROADCAST, flags=["read"], block=True) == Client.e_SUCCESS + ) + + assert len(consumer1.list(tc.URI_BROADCAST, block=True)) == 0 + assert len(consumer2.list(tc.URI_BROADCAST, block=True)) == 0 - self._stop_clients( - [ - fooConsumerAndProducerOnPrimaryProxy, - producerOnReplicaProxy, - barConsumerOnReplicaProxy, - ] - ) + # 2: Route messages and verify + assert ( + producer1.post(tc.URI_BROADCAST, payload=["msg1"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) - def test_verify_broadcast(self, cluster: Cluster): - # 1: Setup producers and consumers - proxies = cluster.proxy_cycle() + _verify_delivery_and_confirm(consumer1, tc.URI_BROADCAST, ["msg1"]) - # Proxy in same datacenter as leader/primary - proxy1 = next(proxies) + _verify_delivery_and_confirm(consumer2, tc.URI_BROADCAST, ["msg1"]) + + assert ( + producer2.post(tc.URI_BROADCAST, payload=["msg2"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) + + _verify_delivery_and_confirm(consumer1, tc.URI_BROADCAST, ["msg2"]) + + _verify_delivery_and_confirm(consumer2, tc.URI_BROADCAST, ["msg2"]) + + # 4: Close everything + _close_clients([producer1, consumer1, producer2, consumer2], [tc.URI_BROADCAST]) + + _stop_clients([producer1, consumer1, producer2, consumer2]) + + +def test_verify_redelivery(cluster: Cluster): + """Drop one consumer having unconfirmed message while there is another + consumer unable to take the message (due to max_unconfirmed_messages + limit). Then start new consumer and make sure it does not crash (DRQS + 156808957) and receives that unconfirmed message. + """ + proxies = cluster.proxy_cycle() - producer1 = proxy1.create_client("producer1") - assert ( - producer1.open(tc.URI_BROADCAST, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) + # Proxy in same datacenter as leader/primary + proxy = next(proxies) - consumer1 = proxy1.create_client("consumer1") - assert ( - consumer1.open(tc.URI_BROADCAST, flags=["read"], block=True) - == Client.e_SUCCESS - ) + producer = proxy.create_client("producer1") + producer.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) - # Replica proxy - proxy2 = next(proxies) + consumer1 = proxy.create_client("consumer1") + consumer1.open( + tc.URI_FANOUT_FOO, + flags=["read"], + consumer_priority=1, + max_unconfirmed_messages=1, + succeed=True, + ) - producer2 = proxy2.create_client("producer2") - assert ( - producer2.open(tc.URI_BROADCAST, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) + consumer2 = proxy.create_client("consumer2") + consumer2.open( + tc.URI_FANOUT_FOO, + flags=["read"], + consumer_priority=1, + max_unconfirmed_messages=1, + succeed=True, + ) - consumer2 = proxy2.create_client("consumer2") - assert ( - consumer2.open(tc.URI_BROADCAST, flags=["read"], block=True) - == Client.e_SUCCESS - ) + producer.post(tc.URI_FANOUT, payload=["1"], succeed=True, wait_ack=True) + producer.post(tc.URI_FANOUT, payload=["2"], succeed=True, wait_ack=True) - assert len(consumer1.list(tc.URI_BROADCAST, block=True)) == 0 - assert len(consumer2.list(tc.URI_BROADCAST, block=True)) == 0 + consumer1.wait_push_event() + before = consumer1.list(tc.URI_FANOUT_FOO, block=True) - # 2: Route messages and verify - assert ( - producer1.post( - tc.URI_BROADCAST, payload=["msg1"], block=True, wait_ack=True - ) - == Client.e_SUCCESS - ) + consumer2.wait_push_event() - self._verify_delivery_and_confirm(consumer1, tc.URI_BROADCAST, ["msg1"]) + consumer1.stop_session(block=True) - self._verify_delivery_and_confirm(consumer2, tc.URI_BROADCAST, ["msg1"]) + consumer1 = proxy.create_client("consumer1") + consumer1.open( + tc.URI_FANOUT_FOO, + flags=["read"], + consumer_priority=1, + max_unconfirmed_messages=1, + succeed=True, + ) - assert ( - producer2.post( - tc.URI_BROADCAST, payload=["msg2"], block=True, wait_ack=True - ) - == Client.e_SUCCESS - ) + consumer1.wait_push_event() + after = consumer1.list(tc.URI_FANOUT_FOO, block=True) - self._verify_delivery_and_confirm(consumer1, tc.URI_BROADCAST, ["msg2"]) + assert before[0].payload == after[0].payload - self._verify_delivery_and_confirm(consumer2, tc.URI_BROADCAST, ["msg2"]) + _stop_clients([producer, consumer1, consumer2]) - # 4: Close everything - self._close_clients( - [producer1, consumer1, producer2, consumer2], [tc.URI_BROADCAST] - ) - self._stop_clients([producer1, consumer1, producer2, consumer2]) - - def test_verify_redelivery(self, cluster: Cluster): - """Drop one consumer having unconfirmed message while there is another - consumer unable to take the message (due to max_unconfirmed_messages - limit). Then start new consumer and make sure it does not crash (DRQS - 156808957) and receives that unconfirmed message. - """ - proxies = cluster.proxy_cycle() - - # Proxy in same datacenter as leader/primary - proxy = next(proxies) - - producer = proxy.create_client("producer1") - producer.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) - - consumer1 = proxy.create_client("consumer1") - consumer1.open( - tc.URI_FANOUT_FOO, - flags=["read"], - consumer_priority=1, - max_unconfirmed_messages=1, - succeed=True, - ) +def test_verify_priority_queue_redelivery(cluster: Cluster): + """Restart consumer having unconfirmed messages while a producer is + still present (queue context is not erased). Make sure the consumer + receives the unconfirmed messages. + """ + proxies = cluster.proxy_cycle() - consumer2 = proxy.create_client("consumer2") - consumer2.open( - tc.URI_FANOUT_FOO, - flags=["read"], - consumer_priority=1, - max_unconfirmed_messages=1, - succeed=True, - ) + # Proxy in same datacenter as leader/primary + proxy = next(proxies) - producer.post(tc.URI_FANOUT, payload=["1"], succeed=True, wait_ack=True) - producer.post(tc.URI_FANOUT, payload=["2"], succeed=True, wait_ack=True) + producer = proxy.create_client("producer") + producer.open(tc.URI_PRIORITY, flags=["write", "ack"], succeed=True) - consumer1.wait_push_event() - before = consumer1.list(tc.URI_FANOUT_FOO, block=True) + consumer = proxy.create_client("consumer") + consumer.open( + tc.URI_PRIORITY, + flags=["read"], + consumer_priority=1, + max_unconfirmed_messages=1, + succeed=True, + ) - consumer2.wait_push_event() + producer.post(tc.URI_PRIORITY, payload=["1"], succeed=True, wait_ack=True) + producer.post(tc.URI_PRIORITY, payload=["2"], succeed=True, wait_ack=True) - consumer1.stop_session(block=True) + consumer.wait_push_event() + before = consumer.list(tc.URI_PRIORITY, block=True) - consumer1 = proxy.create_client("consumer1") - consumer1.open( - tc.URI_FANOUT_FOO, - flags=["read"], - consumer_priority=1, - max_unconfirmed_messages=1, - succeed=True, - ) + consumer.stop_session(block=True) - consumer1.wait_push_event() - after = consumer1.list(tc.URI_FANOUT_FOO, block=True) + consumer = proxy.create_client("consumer") + consumer.open( + tc.URI_PRIORITY, + flags=["read"], + consumer_priority=1, + max_unconfirmed_messages=1, + succeed=True, + ) - assert before[0].payload == after[0].payload + consumer.wait_push_event() + after = consumer.list(tc.URI_PRIORITY, block=True) - self._stop_clients([producer, consumer1, consumer2]) + assert before == after - def test_verify_priority_queue_redelivery(self, cluster: Cluster): - """Restart consumer having unconfirmed messages while a producer is - still present (queue context is not erased). Make sure the consumer - receives the unconfirmed messages. - """ - proxies = cluster.proxy_cycle() + _stop_clients([producer, consumer]) - # Proxy in same datacenter as leader/primary - proxy = next(proxies) - producer = proxy.create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write", "ack"], succeed=True) +def test_verify_partial_close(standard_cluster: Cluster): + """Drop one of two producers both having unacked message (primary is + suspended. Make sure the remaining producer does not get NACK but gets + ACK when primary resumes. + """ + proxies = standard_cluster.proxy_cycle() - consumer = proxy.create_client("consumer") - consumer.open( - tc.URI_PRIORITY, - flags=["read"], - consumer_priority=1, - max_unconfirmed_messages=1, - succeed=True, - ) + proxy = next(proxies) + proxy = next(proxies) - producer.post(tc.URI_PRIORITY, payload=["1"], succeed=True, wait_ack=True) - producer.post(tc.URI_PRIORITY, payload=["2"], succeed=True, wait_ack=True) + producer1 = proxy.create_client("producer1") + producer1.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) - consumer.wait_push_event() - before = consumer.list(tc.URI_PRIORITY, block=True) + producer2 = proxy.create_client("producer2") + producer2.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) - consumer.stop_session(block=True) + leader = standard_cluster.last_known_leader + leader.suspend() - consumer = proxy.create_client("consumer") - consumer.open( - tc.URI_PRIORITY, - flags=["read"], - consumer_priority=1, - max_unconfirmed_messages=1, - succeed=True, - ) + producer1.post(tc.URI_FANOUT, payload=["1"], succeed=True, wait_ack=False) + producer2.post(tc.URI_FANOUT, payload=["2"], succeed=True, wait_ack=False) - consumer.wait_push_event() - after = consumer.list(tc.URI_PRIORITY, block=True) + producer2.stop_session(block=True) - assert before == after + leader.resume() - self._stop_clients([producer, consumer]) + producer1.capture(r"ACK #0: \[ type = ACK status = SUCCESS", 2) - def test_verify_partial_close(self, standard_cluster: Cluster): - """Drop one of two producers both having unacked message (primary is - suspended. Make sure the remaining producer does not get NACK but gets - ACK when primary resumes. - """ - proxies = standard_cluster.proxy_cycle() + _stop_clients([producer1, producer2]) - proxy = next(proxies) - proxy = next(proxies) - producer1 = proxy.create_client("producer1") - producer1.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) +@start_cluster(True, True, True) +@tweak.cluster.queue_operations.open_timeout_ms(2) +def test_command_timeout(standard_cluster: Cluster): + """Simple test to execute onOpenQueueResponse timeout.""" - producer2 = proxy.create_client("producer2") - producer2.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) + # make sure the cluster is healthy and the queue is assigned + # Cannot use proxies as they do not read cluster config - leader = standard_cluster.last_known_leader - leader.suspend() + leader = standard_cluster.last_known_leader + host = standard_cluster.nodes()[0] + if host == leader: + host = standard_cluster.nodes()[1] - producer1.post(tc.URI_FANOUT, payload=["1"], succeed=True, wait_ack=False) - producer2.post(tc.URI_FANOUT, payload=["2"], succeed=True, wait_ack=False) + client = host.create_client("client") + # this may fail due to the short timeout; we just need queue assigned + client.open(tc.URI_FANOUT, flags=["write", "ack"], block=True) - producer2.stop_session(block=True) + leader.suspend() - leader.resume() + result = client.open(tc.URI_FANOUT_FOO, flags=["read"], block=True) + leader.resume() - producer1.capture(r"ACK #0: \[ type = ACK status = SUCCESS", 2) + assert result == Client.e_TIMEOUT - self._stop_clients([producer1, producer2]) - @start_cluster(True, True, True) - @tweak.cluster.queue_operations.open_timeout_ms(2) - def test_command_timeout(self, standard_cluster: Cluster): - """Simple test to execute onOpenQueueResponse timeout.""" +def test_queue_purge_command(standard_cluster: Cluster): + """Ensure that 'queue purge' command is working as expected. Post a + message to the queue, then purge the queue, then bring up a consumer. + Ensure that consumer does not receive any message. + """ + proxy = next(standard_cluster.proxy_cycle()) - # make sure the cluster is healthy and the queue is assigned - # Cannot use proxies as they do not read cluster config + # Start a producer and post a message + producer = proxy.create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) + producer.post(tc.URI_FANOUT, ["msg1"], succeed=True, wait_ack=True) - leader = standard_cluster.last_known_leader - host = standard_cluster.nodes()[0] - if host == leader: - host = standard_cluster.nodes()[1] + leader = standard_cluster.last_known_leader - client = host.create_client("client") - # this may fail due to the short timeout; we just need queue assigned - client.open(tc.URI_FANOUT, flags=["write", "ack"], block=True) + # Purge queue, but *only* for 'foo' appId + leader.command(f"DOMAINS DOMAIN {tc.DOMAIN_FANOUT} QUEUE {tc.TEST_QUEUE} PURGE foo") - leader.suspend() + # Open consumers for all appIds and ensure that the one with 'foo' appId + # does not receive the message, while other consumers do. + consumer1 = proxy.create_client("consumer1") + consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - result = client.open(tc.URI_FANOUT_FOO, flags=["read"], block=True) - leader.resume() + consumer2 = proxy.create_client("consumer2") + consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) - assert result == Client.e_TIMEOUT + consumer3 = proxy.create_client("consumer3") + consumer3.open(tc.URI_FANOUT_BAZ, flags=["read"], succeed=True) - def test_queue_purge_command(self, standard_cluster: Cluster): - """Ensure that 'queue purge' command is working as expected. Post a - message to the queue, then purge the queue, then bring up a consumer. - Ensure that consumer does not receive any message. - """ - proxy = next(standard_cluster.proxy_cycle()) + assert consumer2.wait_push_event() + msgs = consumer2.list(block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" - # Start a producer and post a message - producer = proxy.create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write", "ack"], succeed=True) - producer.post(tc.URI_FANOUT, ["msg1"], succeed=True, wait_ack=True) + assert consumer3.wait_push_event() + msgs = consumer3.list(block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" - leader = standard_cluster.last_known_leader + assert not consumer1.wait_push_event(timeout=5, quiet=True) + msgs = consumer1.list(block=True) + assert len(msgs) == 0 - # Purge queue, but *only* for 'foo' appId - leader.command( - f"DOMAINS DOMAIN {tc.DOMAIN_FANOUT} QUEUE {tc.TEST_QUEUE} PURGE foo" - ) + consumer2.confirm(tc.URI_FANOUT_BAR, "*", succeed=True) + consumer3.confirm(tc.URI_FANOUT_BAZ, "*", succeed=True) - # Open consumers for all appIds and ensure that the one with 'foo' appId - # does not receive the message, while other consumers do. - consumer1 = proxy.create_client("consumer1") - consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + # Stop all consumers, post another message, then purge entire queue + # (i.e., all appIds), then restart all consumers and ensure that none + # of them got any messages. + consumer1.close(tc.URI_FANOUT_FOO, succeed=True) + consumer2.close(tc.URI_FANOUT_BAR, succeed=True) + consumer3.close(tc.URI_FANOUT_BAZ, succeed=True) - consumer2 = proxy.create_client("consumer2") - consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) + producer.post(tc.URI_FANOUT, ["msg2"], succeed=True, wait_ack=True) - consumer3 = proxy.create_client("consumer3") - consumer3.open(tc.URI_FANOUT_BAZ, flags=["read"], succeed=True) + leader.command(f"DOMAINS DOMAIN {tc.DOMAIN_FANOUT} QUEUE {tc.TEST_QUEUE} PURGE *") - assert consumer2.wait_push_event() - msgs = consumer2.list(block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" + consumer1 = proxy.create_client("consumer1") + consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + consumer2 = proxy.create_client("consumer2") + consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) + consumer3 = proxy.create_client("consumer3") + consumer3.open(tc.URI_FANOUT_BAZ, flags=["read"], succeed=True) - assert consumer3.wait_push_event() - msgs = consumer3.list(block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" + consumers = [consumer1, consumer2, consumer3] - assert not consumer1.wait_push_event(timeout=5, quiet=True) - msgs = consumer1.list(block=True) + for consumer in consumers: + assert not consumer.wait_push_event(timeout=2, quiet=True) + msgs = consumer.list(block=True) assert len(msgs) == 0 - consumer2.confirm(tc.URI_FANOUT_BAR, "*", succeed=True) - consumer3.confirm(tc.URI_FANOUT_BAZ, "*", succeed=True) - # Stop all consumers, post another message, then purge entire queue - # (i.e., all appIds), then restart all consumers and ensure that none - # of them got any messages. - consumer1.close(tc.URI_FANOUT_FOO, succeed=True) - consumer2.close(tc.URI_FANOUT_BAR, succeed=True) - consumer3.close(tc.URI_FANOUT_BAZ, succeed=True) +def test_message_properties(cluster: Cluster): + """Ensure that posting different sequences of MessageProperties works.""" + proxies = cluster.proxy_cycle() - producer.post(tc.URI_FANOUT, ["msg2"], succeed=True, wait_ack=True) + # 1: Setup producers and consumers + # Proxy in same datacenter as leader/primary + proxy1 = next(proxies) - leader.command( - f"DOMAINS DOMAIN {tc.DOMAIN_FANOUT} QUEUE {tc.TEST_QUEUE} PURGE *" - ) + producer1 = proxy1.create_client("producer1") + assert ( + producer1.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) - consumer1 = proxy.create_client("consumer1") - consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - consumer2 = proxy.create_client("consumer2") - consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) - consumer3 = proxy.create_client("consumer3") - consumer3.open(tc.URI_FANOUT_BAZ, flags=["read"], succeed=True) + consumer = proxy1.create_client("consumer") + assert ( + consumer.open(tc.URI_PRIORITY, flags=["read"], block=True) == Client.e_SUCCESS + ) - consumers = [consumer1, consumer2, consumer3] + # Replica proxy + proxy2 = next(proxies) - for consumer in consumers: - assert not consumer.wait_push_event(timeout=2, quiet=True) - msgs = consumer.list(block=True) - assert len(msgs) == 0 + producer2 = proxy2.create_client("producer2") + assert ( + producer2.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) + == Client.e_SUCCESS + ) - def test_message_properties(self, cluster: Cluster): - """Ensure that posting different sequences of MessageProperties works.""" - proxies = cluster.proxy_cycle() - - # 1: Setup producers and consumers - # Proxy in same datacenter as leader/primary - proxy1 = next(proxies) - - producer1 = proxy1.create_client("producer1") - assert ( - producer1.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) - - consumer = proxy1.create_client("consumer") - assert ( - consumer.open(tc.URI_PRIORITY, flags=["read"], block=True) - == Client.e_SUCCESS - ) + # 2: Route messages and verify - # Replica proxy - proxy2 = next(proxies) - - producer2 = proxy2.create_client("producer2") - assert ( - producer2.open(tc.URI_PRIORITY, flags=["write", "ack"], block=True) - == Client.e_SUCCESS - ) - - # 2: Route messages and verify - - assert ( - producer1.post( - tc.URI_PRIORITY, - payload=["msg"], - block=True, - wait_ack=True, - messageProperties=[], - ) - == Client.e_SUCCESS + assert ( + producer1.post( + tc.URI_PRIORITY, + payload=["msg"], + block=True, + wait_ack=True, + messageProperties=[], ) + == Client.e_SUCCESS + ) - self._verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) + _verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - assert ( - producer1.post( - tc.URI_PRIORITY, - payload=["msg"], - block=True, - wait_ack=True, - messageProperties=[ - {"name": "pairs_", "value": "3", "type": "E_INT"}, - {"name": "p1", "value": "1", "type": "E_INT"}, - {"name": "p1_value", "value": "1", "type": "E_INT"}, - {"name": "p3", "value": "1", "type": "E_INT"}, - {"name": "p3_value", "value": "1", "type": "E_INT"}, - {"name": "p4", "value": "1", "type": "E_STRING"}, - {"name": "p4_value", "value": "1", "type": "E_STRING"}, - ], - ) - == Client.e_SUCCESS + assert ( + producer1.post( + tc.URI_PRIORITY, + payload=["msg"], + block=True, + wait_ack=True, + messageProperties=[ + {"name": "pairs_", "value": "3", "type": "E_INT"}, + {"name": "p1", "value": "1", "type": "E_INT"}, + {"name": "p1_value", "value": "1", "type": "E_INT"}, + {"name": "p3", "value": "1", "type": "E_INT"}, + {"name": "p3_value", "value": "1", "type": "E_INT"}, + {"name": "p4", "value": "1", "type": "E_STRING"}, + {"name": "p4_value", "value": "1", "type": "E_STRING"}, + ], ) + == Client.e_SUCCESS + ) - self._verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) + _verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - assert ( - producer1.post( - tc.URI_PRIORITY, - payload=["msg"], - block=True, - wait_ack=True, - messageProperties=[ - {"name": "pairs_", "value": "4", "type": "E_INT"}, - {"name": "p1", "value": "1", "type": "E_INT"}, - {"name": "p1_value", "value": "1", "type": "E_INT"}, - {"name": "p2", "value": "1", "type": "E_STRING"}, - {"name": "p2_value", "value": "1", "type": "E_STRING"}, - {"name": "p3", "value": "1", "type": "E_INT"}, - {"name": "p3_value", "value": "1", "type": "E_INT"}, - {"name": "p4", "value": "1", "type": "E_STRING"}, - {"name": "p4_value", "value": "1", "type": "E_STRING"}, - ], - ) - == Client.e_SUCCESS + assert ( + producer1.post( + tc.URI_PRIORITY, + payload=["msg"], + block=True, + wait_ack=True, + messageProperties=[ + {"name": "pairs_", "value": "4", "type": "E_INT"}, + {"name": "p1", "value": "1", "type": "E_INT"}, + {"name": "p1_value", "value": "1", "type": "E_INT"}, + {"name": "p2", "value": "1", "type": "E_STRING"}, + {"name": "p2_value", "value": "1", "type": "E_STRING"}, + {"name": "p3", "value": "1", "type": "E_INT"}, + {"name": "p3_value", "value": "1", "type": "E_INT"}, + {"name": "p4", "value": "1", "type": "E_STRING"}, + {"name": "p4_value", "value": "1", "type": "E_STRING"}, + ], ) + == Client.e_SUCCESS + ) - self._verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) + _verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - assert ( - producer1.post( - tc.URI_PRIORITY, - payload=["msg"], - block=True, - wait_ack=True, - messageProperties=[ - {"name": "pairs_", "value": "3", "type": "E_INT"}, - {"name": "p1", "value": "1", "type": "E_INT"}, - {"name": "p1_value", "value": "1", "type": "E_INT"}, - {"name": "p3", "value": "1", "type": "E_INT"}, - {"name": "p3_value", "value": "1", "type": "E_INT"}, - {"name": "p4", "value": "1", "type": "E_STRING"}, - {"name": "p4_value", "value": "1", "type": "E_STRING"}, - ], - ) - == Client.e_SUCCESS + assert ( + producer1.post( + tc.URI_PRIORITY, + payload=["msg"], + block=True, + wait_ack=True, + messageProperties=[ + {"name": "pairs_", "value": "3", "type": "E_INT"}, + {"name": "p1", "value": "1", "type": "E_INT"}, + {"name": "p1_value", "value": "1", "type": "E_INT"}, + {"name": "p3", "value": "1", "type": "E_INT"}, + {"name": "p3_value", "value": "1", "type": "E_INT"}, + {"name": "p4", "value": "1", "type": "E_STRING"}, + {"name": "p4_value", "value": "1", "type": "E_STRING"}, + ], ) + == Client.e_SUCCESS + ) - self._verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) + _verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - assert ( - producer1.post( - tc.URI_PRIORITY, - payload=["msg"], - block=True, - wait_ack=True, - messageProperties=[ - {"name": "pairs_", "value": "4", "type": "E_INT"}, - {"name": "p1", "value": "1", "type": "E_INT"}, - {"name": "p1_value", "value": "1", "type": "E_INT"}, - {"name": "p2", "value": "1", "type": "E_STRING"}, - {"name": "p2_value", "value": "1", "type": "E_STRING"}, - {"name": "p3", "value": "1", "type": "E_INT"}, - {"name": "p3_value", "value": "1", "type": "E_INT"}, - {"name": "p4", "value": "1", "type": "E_STRING"}, - {"name": "p4_value", "value": "1", "type": "E_STRING"}, - ], - ) - == Client.e_SUCCESS + assert ( + producer1.post( + tc.URI_PRIORITY, + payload=["msg"], + block=True, + wait_ack=True, + messageProperties=[ + {"name": "pairs_", "value": "4", "type": "E_INT"}, + {"name": "p1", "value": "1", "type": "E_INT"}, + {"name": "p1_value", "value": "1", "type": "E_INT"}, + {"name": "p2", "value": "1", "type": "E_STRING"}, + {"name": "p2_value", "value": "1", "type": "E_STRING"}, + {"name": "p3", "value": "1", "type": "E_INT"}, + {"name": "p3_value", "value": "1", "type": "E_INT"}, + {"name": "p4", "value": "1", "type": "E_STRING"}, + {"name": "p4_value", "value": "1", "type": "E_STRING"}, + ], ) + == Client.e_SUCCESS + ) - self._verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) + _verify_delivery_and_confirm(consumer, tc.URI_PRIORITY, ["msg"]) - # 3: Close everything - self._close_clients( - [producer1, consumer, producer2], - [tc.URI_PRIORITY], - ) - self._stop_clients([producer1, consumer, producer2]) + # 3: Close everything + _close_clients( + [producer1, consumer, producer2], + [tc.URI_PRIORITY], + ) + _stop_clients([producer1, consumer, producer2]) diff --git a/src/integration-tests/test_broadcast.py b/src/integration-tests/test_broadcast.py index 1533202885..c9e054aadf 100644 --- a/src/integration-tests/test_broadcast.py +++ b/src/integration-tests/test_broadcast.py @@ -5,382 +5,384 @@ from bmq.dev.it.process.client import Client -class TestBroadcast: - def test_breathing(self, cluster: Cluster): - """ - Verify that broadcast mode works properly for a single producer and a - single consumer. - """ +def test_breathing(cluster: Cluster): + """ + Verify that broadcast mode works properly for a single producer and a + single consumer. + """ - proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) + proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) - producer = proxy1.create_client("producer") - consumer = proxy2.create_client("consumer") + producer = proxy1.create_client("producer") + consumer = proxy2.create_client("consumer") - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - assert ( - producer.post(tc.URI_BROADCAST, payload=["msg1"], block=True, wait_ack=True) - == Client.e_SUCCESS - ) + assert ( + producer.post(tc.URI_BROADCAST, payload=["msg1"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) + + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" + + +def test_multi_consumers(cluster: Cluster): + """ + Verify that broadcast mode works properly for multiple consumers. + """ + + proxies = cluster.proxy_cycle() + producer = next(proxies).create_client("producer") + consumers = [next(proxies).create_client(f"client{i}") for i in range(3)] + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + for consumer in consumers: + consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + + producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) + for consumer in consumers: assert consumer.wait_push_event() msgs = consumer.list(tc.URI_BROADCAST, block=True) assert len(msgs) == 1 assert msgs[0].payload == "msg1" - def test_multi_consumers(self, cluster: Cluster): - """ - Verify that broadcast mode works properly for multiple consumers. - """ - proxies = cluster.proxy_cycle() - producer = next(proxies).create_client("producer") - consumers = [next(proxies).create_client(f"client{i}") for i in range(3)] +def test_multi_producers_consumers(cluster: Cluster): + """ + Verify that broadcast mode works properly for multiple producers and + consumers. + """ + + proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) + po1 = proxy1.create_client("po1") + po2 = proxy1.create_client("po2") + pr1 = proxy2.create_client("pr1") + producers = [po1, po2, pr1] + co1 = proxy1.create_client("CO1") + cr1 = proxy2.create_client("CR1") + cr2 = proxy2.create_client("CR2") + consumers = [co1, cr1, cr2] + for producer in producers: producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - for consumer in consumers: - consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + for consumer in consumers: + consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) + for i, producer in enumerate(producers, 1): + producer.post(tc.URI_BROADCAST, payload=[f"msg{i}"], block=True, wait_ack=True) for consumer in consumers: assert consumer.wait_push_event() msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" - - def test_multi_producers_consumers(self, cluster: Cluster): - """ - Verify that broadcast mode works properly for multiple producers and - consumers. - """ - - proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) - po1 = proxy1.create_client("po1") - po2 = proxy1.create_client("po2") - pr1 = proxy2.create_client("pr1") - producers = [po1, po2, pr1] - co1 = proxy1.create_client("CO1") - cr1 = proxy2.create_client("CR1") - cr2 = proxy2.create_client("CR2") - consumers = [co1, cr1, cr2] - - for producer in producers: - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - for consumer in consumers: - consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - - for i, producer in enumerate(producers, 1): - producer.post( - tc.URI_BROADCAST, payload=[f"msg{i}"], block=True, wait_ack=True - ) - for consumer in consumers: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == i - assert msgs[i - 1].payload == f"msg{i}" - - def test_resubscribe(self, cluster: Cluster): - """ - Verify that when a consumer undergoes a re-subscription, messages - posted during non-subscription will not be received. - """ - - proxy = next(cluster.proxy_cycle()) - producer = proxy.create_client("producer") - consumer = proxy.create_client("consumer") - - # Consumer subscribes - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + assert len(msgs) == i + assert msgs[i - 1].payload == f"msg{i}" - producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" - consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - # Consumer unsubscribes - assert consumer.close(tc.URI_BROADCAST, block=True) == Client.e_SUCCESS +def test_resubscribe(cluster: Cluster): + """ + Verify that when a consumer undergoes a re-subscription, messages + posted during non-subscription will not be received. + """ - producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) + proxy = next(cluster.proxy_cycle()) + producer = proxy.create_client("producer") + consumer = proxy.create_client("consumer") - # Consumer resubscribes - consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - assert not consumer.list(tc.URI_BROADCAST, block=True) + # Consumer subscribes + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg3" + producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" + consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - def test_add_consumers(self, cluster: Cluster): - """ - Verify that only active consumers receive messages as new consumers are - being added. - """ + # Consumer unsubscribes + assert consumer.close(tc.URI_BROADCAST, block=True) == Client.e_SUCCESS - proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) - producer = proxy1.create_client("producer") - co1 = proxy1.create_client("CO1") - cr1 = proxy2.create_client("CR1") + producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + # Consumer resubscribes + consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + assert not consumer.list(tc.URI_BROADCAST, block=True) - # This message should not be received by any consumer - producer.post( - tc.URI_BROADCAST, payload=["null_msg"], succeed=True, wait_ack=True - ) + producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg3" - co1.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - assert not co1.list(tc.URI_BROADCAST, block=True) - # This message should only be received by CO1 - producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) +def test_add_consumers(cluster: Cluster): + """ + Verify that only active consumers receive messages as new consumers are + being added. + """ - assert co1.wait_push_event() - msgs = co1.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" - co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) + proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) + producer = proxy1.create_client("producer") + co1 = proxy1.create_client("CO1") + cr1 = proxy2.create_client("CR1") - cr1.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - assert not cr1.list(tc.URI_BROADCAST, block=True) + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - # This messages should be received by all consumers - producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) + # This message should not be received by any consumer + producer.post(tc.URI_BROADCAST, payload=["null_msg"], succeed=True, wait_ack=True) - for consumer in [co1, cr1]: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg2" + co1.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + assert not co1.list(tc.URI_BROADCAST, block=True) - def test_dynamic_priorities(self, cluster: Cluster): - """ - Verify that only the highest priority consumers receive messages when - the priorities are dynamically changing - """ + # This message should only be received by CO1 + producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) - proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) - producer = proxy1.create_client("producer") - co1 = proxy1.create_client("CO1") - cr1 = proxy2.create_client("CR1") - cr2 = proxy2.create_client("CR2") + assert co1.wait_push_event() + msgs = co1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" + co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - for consumer in [co1, cr1, cr2]: - consumer.open( - tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2 - ) + cr1.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + assert not cr1.list(tc.URI_BROADCAST, block=True) - producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) - for consumer in [co1, cr1, cr2]: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" - consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - - # CR1's priority is lowered. It should not receive messages anymore - assert ( - cr1.configure(tc.URI_BROADCAST, block=True, consumer_priority=1) - == Client.e_SUCCESS - ) + # This messages should be received by all consumers + producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) - producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) + for consumer in [co1, cr1]: + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg2" - for consumer in [co1, cr2]: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg2" - consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - assert not cr1.list(tc.URI_BROADCAST, block=True) +def test_dynamic_priorities(cluster: Cluster): + """ + Verify that only the highest priority consumers receive messages when + the priorities are dynamically changing + """ - # CO1 becomes the single highest priority consumer. Only it should - # receive any message - assert ( - co1.configure(tc.URI_BROADCAST, block=True, consumer_priority=99) - == Client.e_SUCCESS - ) + proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) + producer = proxy1.create_client("producer") + co1 = proxy1.create_client("CO1") + cr1 = proxy2.create_client("CR1") + cr2 = proxy2.create_client("CR2") - producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + for consumer in [co1, cr1, cr2]: + consumer.open( + tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2 + ) - assert co1.wait_push_event() - msgs = co1.list(tc.URI_BROADCAST, block=True) + producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) + for consumer in [co1, cr1, cr2]: + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) assert len(msgs) == 1 - assert msgs[0].payload == "msg3" - co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) + assert msgs[0].payload == "msg1" + consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - for consumer in [cr1, cr2]: - assert not consumer.list(tc.URI_BROADCAST, block=True) + # CR1's priority is lowered. It should not receive messages anymore + assert ( + cr1.configure(tc.URI_BROADCAST, block=True, consumer_priority=1) + == Client.e_SUCCESS + ) - # Increase CR1's priority to be the same as CO1 - assert ( - cr1.configure(tc.URI_BROADCAST, block=True, consumer_priority=99) - == Client.e_SUCCESS - ) + producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) - producer.post(tc.URI_BROADCAST, payload=["msg4"], succeed=True, wait_ack=True) + for consumer in [co1, cr2]: + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg2" + consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - for consumer in [co1, cr1]: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg4" + assert not cr1.list(tc.URI_BROADCAST, block=True) - assert not cr2.list(tc.URI_BROADCAST, block=True) + # CO1 becomes the single highest priority consumer. Only it should + # receive any message + assert ( + co1.configure(tc.URI_BROADCAST, block=True, consumer_priority=99) + == Client.e_SUCCESS + ) - def test_priority_failover(self, cluster: Cluster): - """ - Verify that when highest priority consumers unsubscribe gradually, only - the new highest priority consumers might receive messages. - """ + producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) - proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) - producer = proxy1.create_client("producer") - co1 = proxy1.create_client("CO1") - cr1 = proxy2.create_client("CR1") - cr2 = proxy2.create_client("CR2") + assert co1.wait_push_event() + msgs = co1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg3" + co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - co1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=1) - cr1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2) - cr2.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=3) + for consumer in [cr1, cr2]: + assert not consumer.list(tc.URI_BROADCAST, block=True) + + # Increase CR1's priority to be the same as CO1 + assert ( + cr1.configure(tc.URI_BROADCAST, block=True, consumer_priority=99) + == Client.e_SUCCESS + ) - # CR2 is highest priority; only it should receive messages - producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) + producer.post(tc.URI_BROADCAST, payload=["msg4"], succeed=True, wait_ack=True) - assert cr2.wait_push_event() - msgs = cr2.list(tc.URI_BROADCAST, block=True) + for consumer in [co1, cr1]: + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) assert len(msgs) == 1 - assert msgs[0].payload == "msg1" - cr2.confirm(tc.URI_BROADCAST, "+1", succeed=True) + assert msgs[0].payload == "msg4" - for consumer in [co1, cr1]: - assert not consumer.list(tc.URI_BROADCAST, block=True) + assert not cr2.list(tc.URI_BROADCAST, block=True) - # CR2 unsubscribes. Only CR1 should receive messages now - cr2.close(tc.URI_BROADCAST, succeed=True) - producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) +def test_priority_failover(cluster: Cluster): + """ + Verify that when highest priority consumers unsubscribe gradually, only + the new highest priority consumers might receive messages. + """ - assert cr1.wait_push_event() - msgs = cr1.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg2" - cr1.confirm(tc.URI_BROADCAST, "+1", succeed=True) + proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) + producer = proxy1.create_client("producer") + co1 = proxy1.create_client("CO1") + cr1 = proxy2.create_client("CR1") + cr2 = proxy2.create_client("CR2") - assert not co1.list(tc.URI_BROADCAST, block=True) + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + co1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=1) + cr1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2) + cr2.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=3) - # CR1 unsubscribes. Only CO1 should receive messages now - cr1.close(tc.URI_BROADCAST, succeed=True) + # CR2 is highest priority; only it should receive messages + producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) - producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) + assert cr2.wait_push_event() + msgs = cr2.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" + cr2.confirm(tc.URI_BROADCAST, "+1", succeed=True) - assert co1.wait_push_event() - msgs = co1.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg3" + for consumer in [co1, cr1]: + assert not consumer.list(tc.URI_BROADCAST, block=True) - def test_add_variable_priority_consumers(self, cluster: Cluster): - """ - Verify that only the highest priority consumers receive messages as new - consumers with variable priority are being added. - """ - - proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) - producer = proxy1.create_client("producer") - co1 = proxy1.create_client("CO1") - co2 = proxy1.create_client("CO2") - co3 = proxy1.create_client("CO3") - cr1 = proxy2.create_client("CR1") - cr2 = proxy2.create_client("CR2") - cr3 = proxy2.create_client("CR3") + # CR2 unsubscribes. Only CR1 should receive messages now + cr2.close(tc.URI_BROADCAST, succeed=True) - producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) + producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) - # Add consumer with priority 2 - co1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2) + assert cr1.wait_push_event() + msgs = cr1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg2" + cr1.confirm(tc.URI_BROADCAST, "+1", succeed=True) - producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) + assert not co1.list(tc.URI_BROADCAST, block=True) - assert co1.wait_push_event() - msgs = co1.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg1" - co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) + # CR1 unsubscribes. Only CO1 should receive messages now + cr1.close(tc.URI_BROADCAST, succeed=True) - # Add consumer with priority 1 - co2.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=1) + producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) - producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) + assert co1.wait_push_event() + msgs = co1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg3" - assert co1.wait_push_event() - msgs = co1.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg2" - co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) - assert not co2.list(tc.URI_BROADCAST, block=True) +def test_add_variable_priority_consumers(cluster: Cluster): + """ + Verify that only the highest priority consumers receive messages as new + consumers with variable priority are being added. + """ - # Add consumer with priority 2 - co3.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2) + proxy1, proxy2 = islice(cluster.proxy_cycle(), 2) + producer = proxy1.create_client("producer") + co1 = proxy1.create_client("CO1") + co2 = proxy1.create_client("CO2") + co3 = proxy1.create_client("CO3") + cr1 = proxy2.create_client("CR1") + cr2 = proxy2.create_client("CR2") + cr3 = proxy2.create_client("CR3") - producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) + producer.open(tc.URI_BROADCAST, flags=["write", "ack"], succeed=True) - for consumer in [co1, co3]: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg3" - consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) + # Add consumer with priority 2 + co1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2) - assert not co2.list(tc.URI_BROADCAST, block=True) + producer.post(tc.URI_BROADCAST, payload=["msg1"], succeed=True, wait_ack=True) - # Add consumer with priority 3 - cr1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=3) + assert co1.wait_push_event() + msgs = co1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg1" + co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) - producer.post(tc.URI_BROADCAST, payload=["msg4"], succeed=True, wait_ack=True) + # Add consumer with priority 1 + co2.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=1) - assert cr1.wait_push_event() - msgs = cr1.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg4" - cr1.confirm(tc.URI_BROADCAST, "+1", succeed=True) + producer.post(tc.URI_BROADCAST, payload=["msg2"], succeed=True, wait_ack=True) - for consumer in [co1, co2, co3]: - assert not consumer.list(tc.URI_BROADCAST, block=True) + assert co1.wait_push_event() + msgs = co1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg2" + co1.confirm(tc.URI_BROADCAST, "+1", succeed=True) - # Add consumer with priority 5 - cr2.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=5) + assert not co2.list(tc.URI_BROADCAST, block=True) - producer.post(tc.URI_BROADCAST, payload=["msg5"], succeed=True, wait_ack=True) + # Add consumer with priority 2 + co3.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=2) - assert cr2.wait_push_event() - msgs = cr2.list(tc.URI_BROADCAST, block=True) + producer.post(tc.URI_BROADCAST, payload=["msg3"], succeed=True, wait_ack=True) + + for consumer in [co1, co3]: + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) assert len(msgs) == 1 - assert msgs[0].payload == "msg5" - cr2.confirm(tc.URI_BROADCAST, "+1", succeed=True) + assert msgs[0].payload == "msg3" + consumer.confirm(tc.URI_BROADCAST, "+1", succeed=True) - for consumer in [co1, co2, co3, cr1]: - assert not consumer.list(tc.URI_BROADCAST, block=True) + assert not co2.list(tc.URI_BROADCAST, block=True) - # Add consumer with priority 5 - cr3.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=5) + # Add consumer with priority 3 + cr1.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=3) - producer.post(tc.URI_BROADCAST, payload=["msg6"], succeed=True, wait_ack=True) + producer.post(tc.URI_BROADCAST, payload=["msg4"], succeed=True, wait_ack=True) - for consumer in [cr2, cr3]: - assert consumer.wait_push_event() - msgs = consumer.list(tc.URI_BROADCAST, block=True) - assert len(msgs) == 1 - assert msgs[0].payload == "msg6" + assert cr1.wait_push_event() + msgs = cr1.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg4" + cr1.confirm(tc.URI_BROADCAST, "+1", succeed=True) + + for consumer in [co1, co2, co3]: + assert not consumer.list(tc.URI_BROADCAST, block=True) - for consumer in [co1, co2, co3, cr1]: - assert not consumer.list(tc.URI_BROADCAST, block=True) + # Add consumer with priority 5 + cr2.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=5) + + producer.post(tc.URI_BROADCAST, payload=["msg5"], succeed=True, wait_ack=True) + + assert cr2.wait_push_event() + msgs = cr2.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg5" + cr2.confirm(tc.URI_BROADCAST, "+1", succeed=True) + + for consumer in [co1, co2, co3, cr1]: + assert not consumer.list(tc.URI_BROADCAST, block=True) + + # Add consumer with priority 5 + cr3.open(tc.URI_BROADCAST, flags=["read"], succeed=True, consumer_priority=5) + + producer.post(tc.URI_BROADCAST, payload=["msg6"], succeed=True, wait_ack=True) + + for consumer in [cr2, cr3]: + assert consumer.wait_push_event() + msgs = consumer.list(tc.URI_BROADCAST, block=True) + assert len(msgs) == 1 + assert msgs[0].payload == "msg6" + + for consumer in [co1, co2, co3, cr1]: + assert not consumer.list(tc.URI_BROADCAST, block=True) diff --git a/src/integration-tests/test_compression.py b/src/integration-tests/test_compression.py index 566d19bc4a..c79660226b 100644 --- a/src/integration-tests/test_compression.py +++ b/src/integration-tests/test_compression.py @@ -9,45 +9,44 @@ from bmq.dev.it.util import random_string -class TestCompression: - def test_compression_restart(self, cluster: Cluster): - - # Start a producer and post a message. - proxies = cluster.proxy_cycle() - producer = next(proxies).create_client("producer") - producer.open(tc.URI_PRIORITY_SC, flags=["write", "ack"], succeed=True) - - # Note for compression, we use a much larger payload of length greater - # than 1024 characters. The reason being that internally BMQ SDK skips - # compressing messages which are small sized specifically less than - # 1024 bytes. In this test, we use a randomly generated 5000 character - # string. - payload = random_string(len=5000) - producer.post( - tc.URI_PRIORITY_SC, - payload=[payload], - wait_ack=True, - succeed=True, - compression_algorithm_type="ZLIB", - ) - - # Use strong consistency (SC) to ensure that majority nodes in the - # cluster have received the message at the storage layer before - # restarting the cluster. - - cluster.restart_nodes() - # For a standard cluster, states have already been restored as part of - # leader re-election. - if cluster.is_local: - producer.wait_state_restored() - - consumer = next(proxies).create_client("consumer") - consumer.open(tc.URI_PRIORITY_SC, flags=["read"], succeed=True) - consumer.wait_push_event() - msgs = consumer.list(tc.URI_PRIORITY_SC, block=True) - - # we truncate the message to 32 characters in the bmqtool api used by - # the consumer.list function - # TODO: Add support for listing complete messages in bmqtool - assert len(msgs) == 1 - assert msgs[0].payload[:32] == payload[:32] +def test_compression_restart(cluster: Cluster): + + # Start a producer and post a message. + proxies = cluster.proxy_cycle() + producer = next(proxies).create_client("producer") + producer.open(tc.URI_PRIORITY_SC, flags=["write", "ack"], succeed=True) + + # Note for compression, we use a much larger payload of length greater + # than 1024 characters. The reason being that internally BMQ SDK skips + # compressing messages which are small sized specifically less than + # 1024 bytes. In this test, we use a randomly generated 5000 character + # string. + payload = random_string(len=5000) + producer.post( + tc.URI_PRIORITY_SC, + payload=[payload], + wait_ack=True, + succeed=True, + compression_algorithm_type="ZLIB", + ) + + # Use strong consistency (SC) to ensure that majority nodes in the + # cluster have received the message at the storage layer before + # restarting the cluster. + + cluster.restart_nodes() + # For a standard cluster, states have already been restored as part of + # leader re-election. + if cluster.is_local: + producer.wait_state_restored() + + consumer = next(proxies).create_client("consumer") + consumer.open(tc.URI_PRIORITY_SC, flags=["read"], succeed=True) + consumer.wait_push_event() + msgs = consumer.list(tc.URI_PRIORITY_SC, block=True) + + # we truncate the message to 32 characters in the bmqtool api used by + # the consumer.list function + # TODO: Add support for listing complete messages in bmqtool + assert len(msgs) == 1 + assert msgs[0].payload[:32] == payload[:32] diff --git a/src/integration-tests/test_confirm_after_killing_primary.py b/src/integration-tests/test_confirm_after_killing_primary.py index c88d6f0e74..90b5fc47b7 100644 --- a/src/integration-tests/test_confirm_after_killing_primary.py +++ b/src/integration-tests/test_confirm_after_killing_primary.py @@ -1,78 +1,73 @@ +""" +This test case verifies fix for the broker crash when virtual iterator goes +out of sync while processing CONFIRM after converting priority queue to +local. +""" + import bmq.dev.it.testconstants as tc -from bmq.dev.it.fixtures import ( # pylint: disable=unused-import - Cluster, - standard_cluster as cluster, +from bmq.dev.it.fixtures import Cluster +from bmq.dev.it.fixtures import ( + standard_cluster as cluster, # pylint: disable=unused-import ) from bmq.dev.it.process.client import Client from bmq.dev.it.util import wait_until -class TestConfirmAfterKillingPrimary: - """ - This test case verifies fix for the broker crash when virtual iterator goes - out of sync while processing CONFIRM after converting priority queue to - local. DRQS 144256387. - """ - - def test_confirm_after_killing_primary(self, cluster: Cluster): - proxies = cluster.proxy_cycle() +def test_confirm_after_killing_primary(cluster: Cluster): + proxies = cluster.proxy_cycle() - # we want proxy connected to a replica - next(proxies) + # we want proxy connected to a replica + next(proxies) - proxy = next(proxies) - consumer = proxy.create_client("consumer") - producer = proxy.create_client("producer") + proxy = next(proxies) + consumer = proxy.create_client("consumer") + producer = proxy.create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write", "ack"], succeed=True) - consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + producer.open(tc.URI_PRIORITY, flags=["write", "ack"], succeed=True) + consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - producer.post(tc.URI_PRIORITY, payload=["msg1"], wait_ack=True, succeed=True) + producer.post(tc.URI_PRIORITY, payload=["msg1"], wait_ack=True, succeed=True) - consumer.wait_push_event() - assert wait_until( - lambda: len(consumer.list(tc.URI_PRIORITY, block=True)) == 1, 2 - ) - msgs = consumer.list(tc.URI_PRIORITY, block=True) - assert msgs[0].payload == "msg1" + consumer.wait_push_event() + assert wait_until(lambda: len(consumer.list(tc.URI_PRIORITY, block=True)) == 1, 2) + msgs = consumer.list(tc.URI_PRIORITY, block=True) + assert msgs[0].payload == "msg1" - # make the quorum for replica to be 1 so it becomes new primary - replica = cluster.process(proxy.get_active_node()) - for node in cluster.nodes(): - if node == replica: - node.set_quorum(1) - else: - node.set_quorum(99) + # make the quorum for replica to be 1 so it becomes new primary + replica = cluster.process(proxy.get_active_node()) + for node in cluster.nodes(): + if node == replica: + node.set_quorum(1) + else: + node.set_quorum(99) - # kill the primary - replica.drain() - cluster.drain() - leader = cluster.last_known_leader - leader.check_exit_code = False - leader.kill() - leader.wait() + # kill the primary + replica.drain() + cluster.drain() + leader = cluster.last_known_leader + leader.check_exit_code = False + leader.kill() + leader.wait() - # wait for new leader - cluster.wait_leader() - assert cluster.last_known_leader == replica + # wait for new leader + cluster.wait_leader() + assert cluster.last_known_leader == replica - # need to wait for remote queue converted to local - # otherwise CONFIRM/PUT can get rejected if happen in between the - # conversion - assert replica.outputs_substr( - f"Rebuilt internal state of queue engine for queue [{tc.URI_PRIORITY}]", - timeout=5, - ) + # need to wait for remote queue converted to local + # otherwise CONFIRM/PUT can get rejected if happen in between the + # conversion + assert replica.outputs_substr( + f"Rebuilt internal state of queue engine for queue [{tc.URI_PRIORITY}]", + timeout=5, + ) - # confirm - assert consumer.confirm(tc.URI_PRIORITY, "*", block=True) == Client.e_SUCCESS - # post - producer.post(tc.URI_PRIORITY, payload=["msg2"], wait_ack=True, succeed=True) + # confirm + assert consumer.confirm(tc.URI_PRIORITY, "*", block=True) == Client.e_SUCCESS + # post + producer.post(tc.URI_PRIORITY, payload=["msg2"], wait_ack=True, succeed=True) - # verify that replica did not crash - consumer.wait_push_event() - assert wait_until( - lambda: len(consumer.list(tc.URI_PRIORITY, block=True)) == 1, 2 - ) - msgs = consumer.list(tc.URI_PRIORITY, block=True) - assert msgs[0].payload == "msg2" + # verify that replica did not crash + consumer.wait_push_event() + assert wait_until(lambda: len(consumer.list(tc.URI_PRIORITY, block=True)) == 1, 2) + msgs = consumer.list(tc.URI_PRIORITY, block=True) + assert msgs[0].payload == "msg2" diff --git a/src/integration-tests/test_fanout_priorities.py b/src/integration-tests/test_fanout_priorities.py index 4a84522a7c..c40302df46 100644 --- a/src/integration-tests/test_fanout_priorities.py +++ b/src/integration-tests/test_fanout_priorities.py @@ -1,101 +1,94 @@ +""" +This suite of test cases exercises functionality of prioritizing fanout +consumers . +""" + + from bmq.dev.it.fixtures import Cluster, cluster # pylint: disable=unused-import from bmq.dev.it.process.client import Client from bmq.dev.it.util import wait_until -class TestFanoutPriorities: - """ - This suite of test cases exercises functionality of prioritizing - fanout consumers . - """ +def test_fanout_priorities(cluster: Cluster): + # create foo, bar, and baz clients on every node. - def test_fanout_priorities(self, cluster: Cluster): - # create foo, bar, and baz clients on every node. + # two of each with priorities 1 and 2 - # two of each with priorities 1 and 2 + producers = [] + apps = ["foo", "bar", "baz"] + proxies = cluster.proxy_cycle() - producers = [] - apps = ["foo", "bar", "baz"] - proxies = cluster.proxy_cycle() + nodes = cluster.nodes() + highPriorityQueues = [] + lowPriorityQueues = [] - nodes = cluster.nodes() - highPriorityQueues = [] - lowPriorityQueues = [] + nodes.append(next(proxies)) + nodes.append(next(proxies)) - nodes.append(next(proxies)) - nodes.append(next(proxies)) + for node in nodes: + client = node.create_client(f"{node.name}Consumer2") + [queues] = client.open_fanout_queues( + 1, flags=["read"], consumer_priority=2, block=True, appids=apps + ) + highPriorityQueues += queues - for node in nodes: - client = node.create_client(f"{node.name}Consumer2") - [queues] = client.open_fanout_queues( - 1, flags=["read"], consumer_priority=2, block=True, appids=apps - ) - highPriorityQueues += queues + client = node.create_client(f"{node.name}Consumer1") + [queues] = client.open_fanout_queues( + 1, flags=["read"], consumer_priority=1, block=True, appids=apps + ) + lowPriorityQueues += queues - client = node.create_client(f"{node.name}Consumer1") - [queues] = client.open_fanout_queues( - 1, flags=["read"], consumer_priority=1, block=True, appids=apps - ) - lowPriorityQueues += queues + [producer] = client.open_fanout_queues(1, flags=["write", "ack"], block=True) + producers.append(producer) - [producer] = client.open_fanout_queues( - 1, flags=["write", "ack"], block=True - ) - producers.append(producer) + # Deliver to high priorities - # Deliver to high priorities + _verify_delivery(producers, "before", highPriorityQueues, lowPriorityQueues) - self._verify_delivery( - producers, "before", highPriorityQueues, lowPriorityQueues - ) + # reverse priorities + for queue in lowPriorityQueues: + assert queue.configure(consumer_priority=2, block=True) == Client.e_SUCCESS - # reverse priorities - for queue in lowPriorityQueues: - assert queue.configure(consumer_priority=2, block=True) == Client.e_SUCCESS + for queue in highPriorityQueues: + assert queue.configure(consumer_priority=1, block=True) == Client.e_SUCCESS - for queue in highPriorityQueues: - assert queue.configure(consumer_priority=1, block=True) == Client.e_SUCCESS + # Deliver to new high priorities - # Deliver to new high priorities + _verify_delivery(producers, "after", lowPriorityQueues, highPriorityQueues) - self._verify_delivery(producers, "after", lowPriorityQueues, highPriorityQueues) + # Close everything - # Close everything + for queue in producers: + assert queue.close(block=True) == Client.e_SUCCESS - for queue in producers: - assert queue.close(block=True) == Client.e_SUCCESS + for queue in lowPriorityQueues: + assert queue.close(block=True) == Client.e_SUCCESS - for queue in lowPriorityQueues: - assert queue.close(block=True) == Client.e_SUCCESS + for queue in highPriorityQueues: + assert queue.close(block=True) == Client.e_SUCCESS - for queue in highPriorityQueues: - assert queue.close(block=True) == Client.e_SUCCESS - def _verify_delivery( - self, producers, message, highPriorityQueues, lowPriorityQueues - ): +def _verify_delivery(producers, message, highPriorityQueues, lowPriorityQueues): - for i, producer in enumerate(producers): - # there is one producer on each node - assert ( - producer.post([f"{message}{i}"], block=True, wait_ack=True) - == Client.e_SUCCESS - ) - # this results in one message per each item in highPriorityQueues + for i, producer in enumerate(producers): + # there is one producer on each node + assert ( + producer.post([f"{message}{i}"], block=True, wait_ack=True) + == Client.e_SUCCESS + ) + # this results in one message per each item in highPriorityQueues - # pylint: disable=cell-var-from-loop; passing lambda to 'wait_until' is safe - for queue in highPriorityQueues: - messages = [] - assert wait_until(lambda: len(queue.list(block=True)) == 1, 2, quiet=True) + # pylint: disable=cell-var-from-loop; passing lambda to 'wait_until' is safe + for queue in highPriorityQueues: + messages = [] + assert wait_until(lambda: len(queue.list(block=True)) == 1, 2, quiet=True) - msgs = queue.list(block=True) - assert msgs[0].payload not in messages - messages.append(msgs[0].payload) + msgs = queue.list(block=True) + assert msgs[0].payload not in messages + messages.append(msgs[0].payload) - assert queue.confirm("*", block=True) == Client.e_SUCCESS + assert queue.confirm("*", block=True) == Client.e_SUCCESS - for queue in lowPriorityQueues: - # assert no PUSH received within 0.1 second - assert not wait_until( - lambda: len(queue.list(block=True)) == 1, 0.1, quiet=True - ) + for queue in lowPriorityQueues: + # assert no PUSH received within 0.1 second + assert not wait_until(lambda: len(queue.list(block=True)) == 1, 0.1, quiet=True) diff --git a/src/integration-tests/test_leader_node_delay.py b/src/integration-tests/test_leader_node_delay.py index 951a9cb1d6..050e5d55ee 100644 --- a/src/integration-tests/test_leader_node_delay.py +++ b/src/integration-tests/test_leader_node_delay.py @@ -7,40 +7,39 @@ import bmq.dev.it.testconstants as tc from bmq.dev.it.fixtures import Cluster -from bmq.dev.it.fixtures import ( # pylint: disable=unused-import - standard_cluster as cluster, +from bmq.dev.it.fixtures import ( + standard_cluster as cluster, # pylint: disable=unused-import ) -class TestLeaderNodeDelay: - def test_leader_node_delay(self, cluster: Cluster): - leader = cluster.last_known_leader - followers = [node for node in cluster.nodes() if node is not leader] +def test_leader_node_delay(cluster: Cluster): + leader = cluster.last_known_leader + followers = [node for node in cluster.nodes() if node is not leader] - # 1. Suspend leader node and wait until followers notice: + # 1. Suspend leader node and wait until followers notice: - for follower in followers: - # We don't want the follower nodes to elect a leader among - # themselves when we suspend the current leader - follower.set_quorum(100, succeed=True) + for follower in followers: + # We don't want the follower nodes to elect a leader among + # themselves when we suspend the current leader + follower.set_quorum(100, succeed=True) - # make sure the folllower is available - consumer = follower.create_client("consumer") - consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) - consumer.close(tc.URI_BROADCAST, succeed=True) - consumer.exit_gracefully() + # make sure the folllower is available + consumer = follower.create_client("consumer") + consumer.open(tc.URI_BROADCAST, flags=["read"], succeed=True) + consumer.close(tc.URI_BROADCAST, succeed=True) + consumer.exit_gracefully() - leader.suspend() + leader.suspend() - for follower in followers: - assert follower.outputs_substr("new code: LEADER_NO_HEARTBEAT", 120) + for follower in followers: + assert follower.outputs_substr("new code: LEADER_NO_HEARTBEAT", 120) - # 2. Resume leader, then verify each follower node recognizes the - # transition of the leader from passive to active: - leader.resume() + # 2. Resume leader, then verify each follower node recognizes the + # transition of the leader from passive to active: + leader.resume() - for follower in followers: - assert follower.outputs_regex( - "#ELECTOR_INFO: leader.*transitioning from PASSIVE to ACTIVE", - timeout=60, - ) + for follower in followers: + assert follower.outputs_regex( + "#ELECTOR_INFO: leader.*transitioning from PASSIVE to ACTIVE", + timeout=60, + ) diff --git a/src/integration-tests/test_list_messages.py b/src/integration-tests/test_list_messages.py index ebda978e61..adc27140e9 100644 --- a/src/integration-tests/test_list_messages.py +++ b/src/integration-tests/test_list_messages.py @@ -14,139 +14,135 @@ def expected_header(start, count, total, size): ) -class TestListMessages: - def test_list_messages_fanout(self, cluster: Cluster): - - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() - - producer = next(proxies).create_client("producer") - producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - - for i in range(1, 4): - producer.post( - tc.URI_FANOUT, - ["x" * 10 * i], - wait_ack=True, - succeed=True, - ) - - consumer = next(proxies).create_client("consumer") - - for i, uri in enumerate( - [tc.URI_FANOUT_FOO, tc.URI_FANOUT_BAR, tc.URI_FANOUT_BAZ] - ): - consumer.open(uri, flags=["read"], succeed=True) - - assert wait_until(lambda: len(consumer.list(block=True)) == 9, TIMEOUT) - guids = [msg.guid for msg in consumer.list(tc.URI_FANOUT_FOO, block=True)] - - for i, uri in enumerate( - [tc.URI_FANOUT_FOO, tc.URI_FANOUT_BAR, tc.URI_FANOUT_BAZ] - ): - consumer.confirm(uri, guids[i], succeed=True) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, 3) - assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) - assert leader.outputs_substr("10", TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) - assert leader.outputs_substr("30", TIMEOUT) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED") - assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) - assert leader.outputs_substr("10", TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) - assert leader.outputs_substr("30", TIMEOUT) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 1, 1) - assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, -1, 1) - assert leader.outputs_substr(expected_header(2, 1, 3, 30), TIMEOUT) - assert leader.outputs_substr("30", TIMEOUT) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 1, -1) - assert leader.outputs_substr(expected_header(0, 1, 3, 10), TIMEOUT) - assert leader.outputs_substr("10", TIMEOUT) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, -1, -1) - assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) - - leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED garbage") - assert leader.outputs_substr("Error processing command", TIMEOUT) - - time.sleep(2) # Allow previous confirmations to complete - for i, appid in enumerate(["foo", "bar", "baz"]): - leader.list_messages( - tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED", appid=appid - ) - assert leader.outputs_substr( - expected_header(0, 2, 2, 60 - (i + 1) * 10), TIMEOUT - ) - for j in range(0, 3): - if i != j: - assert leader.outputs_regex(f"{guids[j]}.*{(j + 1) * 10}", TIMEOUT) +def test_list_messages_fanout(cluster: Cluster): - leader.list_messages( - tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED", appid="pikachu" - ) - assert leader.outputs_substr("Invalid 'LIST' command: invalid APPID", TIMEOUT) + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() + + producer = next(proxies).create_client("producer") + producer.open(tc.URI_FANOUT, flags=["write,ack"], succeed=True) - def test_list_messages_priority(self, cluster: Cluster): - leader = cluster.last_known_leader - proxies = cluster.proxy_cycle() + for i in range(1, 4): + producer.post( + tc.URI_FANOUT, + ["x" * 10 * i], + wait_ack=True, + succeed=True, + ) - producer = next(proxies).create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) + consumer = next(proxies).create_client("consumer") - for i in range(1, 4): - producer.post( - tc.URI_PRIORITY, - ["x" * 10 * i], - wait_ack=True, - succeed=True, - ) + for i, uri in enumerate([tc.URI_FANOUT_FOO, tc.URI_FANOUT_BAR, tc.URI_FANOUT_BAZ]): + consumer.open(uri, flags=["read"], succeed=True) - client = next(proxies).create_client("consumer") - client.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + assert wait_until(lambda: len(consumer.list(block=True)) == 9, TIMEOUT) + guids = [msg.guid for msg in consumer.list(tc.URI_FANOUT_FOO, block=True)] - assert wait_until(lambda: len(client.list(block=True)) == 3, TIMEOUT) - _ = [msg.guid for msg in client.list(uri=tc.URI_PRIORITY, block=True)] + for i, uri in enumerate([tc.URI_FANOUT_FOO, tc.URI_FANOUT_BAR, tc.URI_FANOUT_BAZ]): + consumer.confirm(uri, guids[i], succeed=True) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, 3) - assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) - assert leader.outputs_substr("10", TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) - assert leader.outputs_substr("30", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, 3) + assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) + assert leader.outputs_substr("10", TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) + assert leader.outputs_substr("30", TIMEOUT) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, "UNLIMITED") - assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) - assert leader.outputs_substr("10", TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) - assert leader.outputs_substr("30", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED") + assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) + assert leader.outputs_substr("10", TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) + assert leader.outputs_substr("30", TIMEOUT) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 1, 1) - assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 1, 1) + assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, -1, 1) - assert leader.outputs_substr(expected_header(2, 1, 3, 30), TIMEOUT) - assert leader.outputs_substr("30", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, -1, 1) + assert leader.outputs_substr(expected_header(2, 1, 3, 30), TIMEOUT) + assert leader.outputs_substr("30", TIMEOUT) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 1, -1) - assert leader.outputs_substr(expected_header(0, 1, 3, 10), TIMEOUT) - assert leader.outputs_substr("10", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 1, -1) + assert leader.outputs_substr(expected_header(0, 1, 3, 10), TIMEOUT) + assert leader.outputs_substr("10", TIMEOUT) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, -1, -1) - assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) - assert leader.outputs_substr("20", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, -1, -1) + assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) - leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, "UNLIMITED garbage") - assert leader.outputs_substr("Error processing command", TIMEOUT) + leader.list_messages(tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED garbage") + assert leader.outputs_substr("Error processing command", TIMEOUT) + time.sleep(2) # Allow previous confirmations to complete + for i, appid in enumerate(["foo", "bar", "baz"]): leader.list_messages( - tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, "UNLIMITED", appid="pikachu" + tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED", appid=appid + ) + assert leader.outputs_substr( + expected_header(0, 2, 2, 60 - (i + 1) * 10), TIMEOUT + ) + for j in range(0, 3): + if i != j: + assert leader.outputs_regex(f"{guids[j]}.*{(j + 1) * 10}", TIMEOUT) + + leader.list_messages( + tc.DOMAIN_FANOUT, tc.TEST_QUEUE, 0, "UNLIMITED", appid="pikachu" + ) + assert leader.outputs_substr("Invalid 'LIST' command: invalid APPID", TIMEOUT) + + +def test_list_messages_priority(cluster: Cluster): + leader = cluster.last_known_leader + proxies = cluster.proxy_cycle() + + producer = next(proxies).create_client("producer") + producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) + + for i in range(1, 4): + producer.post( + tc.URI_PRIORITY, + ["x" * 10 * i], + wait_ack=True, + succeed=True, ) - assert leader.outputs_substr("Invalid 'LIST' command: invalid APPID", TIMEOUT) + + client = next(proxies).create_client("consumer") + client.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + + assert wait_until(lambda: len(client.list(block=True)) == 3, TIMEOUT) + _ = [msg.guid for msg in client.list(uri=tc.URI_PRIORITY, block=True)] + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, 3) + assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) + assert leader.outputs_substr("10", TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) + assert leader.outputs_substr("30", TIMEOUT) + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, "UNLIMITED") + assert leader.outputs_substr(expected_header(0, 3, 3, 60), TIMEOUT) + assert leader.outputs_substr("10", TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) + assert leader.outputs_substr("30", TIMEOUT) + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 1, 1) + assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, -1, 1) + assert leader.outputs_substr(expected_header(2, 1, 3, 30), TIMEOUT) + assert leader.outputs_substr("30", TIMEOUT) + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 1, -1) + assert leader.outputs_substr(expected_header(0, 1, 3, 10), TIMEOUT) + assert leader.outputs_substr("10", TIMEOUT) + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, -1, -1) + assert leader.outputs_substr(expected_header(1, 1, 3, 20), TIMEOUT) + assert leader.outputs_substr("20", TIMEOUT) + + leader.list_messages(tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, "UNLIMITED garbage") + assert leader.outputs_substr("Error processing command", TIMEOUT) + + leader.list_messages( + tc.DOMAIN_PRIORITY, tc.TEST_QUEUE, 0, "UNLIMITED", appid="pikachu" + ) + assert leader.outputs_substr("Invalid 'LIST' command: invalid APPID", TIMEOUT) diff --git a/src/integration-tests/test_maxunconfirmed.py b/src/integration-tests/test_maxunconfirmed.py index 169530295b..ef8437af4e 100644 --- a/src/integration-tests/test_maxunconfirmed.py +++ b/src/integration-tests/test_maxunconfirmed.py @@ -1,7 +1,3 @@ -""" -DRQS 168460165. -""" - import bmq.dev.it.testconstants as tc from bmq.dev.it.fixtures import ( # pylint: disable=unused-import Cluster, diff --git a/src/integration-tests/test_queue_close.py b/src/integration-tests/test_queue_close.py index d4d9c1d4f2..14eaba8622 100644 --- a/src/integration-tests/test_queue_close.py +++ b/src/integration-tests/test_queue_close.py @@ -13,162 +13,164 @@ from bmq.dev.it.process.client import Client -class TestCloseQueue: - def test_close_queue(self, local_cluster: Cluster): - assert local_cluster.is_local +def test_close_queue(local_cluster: Cluster): + assert local_cluster.is_local - # Start a consumer and open a queue - proxies = local_cluster.proxy_cycle() - consumer = next(proxies).create_client("consumer") - consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + # Start a consumer and open a queue + proxies = local_cluster.proxy_cycle() + consumer = next(proxies).create_client("consumer") + consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - # Shutdown the broker - leader = local_cluster.last_known_leader - leader.stop() + # Shutdown the broker + leader = local_cluster.last_known_leader + leader.stop() - # Try to close the queue - consumer.wait_connection_lost() + # Try to close the queue + consumer.wait_connection_lost() - # Pending queue can be closed successfully - assert consumer.close(tc.URI_PRIORITY, block=True) == Client.e_SUCCESS + # Pending queue can be closed successfully + assert consumer.close(tc.URI_PRIORITY, block=True) == Client.e_SUCCESS - @tweak.domain.max_consumers(1) - @start_cluster(False) - def test_close_while_reopening(self, standard_cluster: Cluster): - """ - DRQS 169125974. Closing queue while reopen response is pending should - not result in a dangling handle. - """ - cluster = standard_cluster +@tweak.domain.max_consumers(1) +@start_cluster(False) +def test_close_while_reopening(standard_cluster: Cluster): + """ + DRQS 169125974. Closing queue while reopen response is pending should + not result in a dangling handle. + """ - west1 = cluster.start_node("west1") - # make it primary - west1.set_quorum(1) + cluster = standard_cluster - # Two replicas for a total of 3 nodes - east1 = cluster.start_node("east1") - east1.set_quorum(5) + west1 = cluster.start_node("west1") + # make it primary + west1.set_quorum(1) - east2 = cluster.start_node("east2") - east2.set_quorum(5) + # Two replicas for a total of 3 nodes + east1 = cluster.start_node("east1") + east1.set_quorum(5) - east1.wait_status(wait_leader=True, wait_ready=True) + east2 = cluster.start_node("east2") + east2.set_quorum(5) - # west1 is the primary - assert west1 == east1.last_known_leader + east1.wait_status(wait_leader=True, wait_ready=True) - # One proxy connected to the primary - westp = cluster.start_proxy("westp") + # west1 is the primary + assert west1 == east1.last_known_leader - consumer1 = westp.create_client("consumer1") - consumer2 = westp.create_client("consumer2") + # One proxy connected to the primary + westp = cluster.start_proxy("westp") - consumer1.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + consumer1 = westp.create_client("consumer1") + consumer2 = westp.create_client("consumer2") - assert west1 == cluster.process(westp.get_active_node()) + consumer1.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - # Should fail - consumer2.open(tc.URI_PRIORITY, flags=["read"], succeed=False) + assert west1 == cluster.process(westp.get_active_node()) - east1.set_quorum(3) - east2.set_quorum(3) + # Should fail + consumer2.open(tc.URI_PRIORITY, flags=["read"], succeed=False) - # Stop the primary. The proxy will pick new active node and re-issue - # Open request but the new active node (either r1 or r2) will not - # respond because there is no quorum (3) for new primary + east1.set_quorum(3) + east2.set_quorum(3) - west1.exit_gracefully() - # Wait for the subprocess to terminate - west1.wait() + # Stop the primary. The proxy will pick new active node and re-issue + # Open request but the new active node (either r1 or r2) will not + # respond because there is no quorum (3) for new primary - # Now send Close request which the proxy should park - consumer1.close(tc.URI_PRIORITY, block=False) + west1.exit_gracefully() + # Wait for the subprocess to terminate + west1.wait() - # Restore the quorum. Proxy should send the parked Close _after_ - # receiving Reopen response (and after sending Configure request) - west2 = cluster.start_node("west2") - west2.wait_status(wait_leader=True, wait_ready=True) + # Now send Close request which the proxy should park + consumer1.close(tc.URI_PRIORITY, block=False) - # Should succeed now! - consumer2.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + # Restore the quorum. Proxy should send the parked Close _after_ + # receiving Reopen response (and after sending Configure request) + west2 = cluster.start_node("west2") + west2.wait_status(wait_leader=True, wait_ready=True) - consumer3 = westp.create_client("consumer3") - # Should fail - consumer3.open(tc.URI_PRIORITY, flags=["read"], succeed=False) + # Should succeed now! + consumer2.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - def test_close_open(self, standard_cluster: Cluster): - """ - DRQS 169326671. Close, followed by Open with a different subId. - """ - proxies = standard_cluster.proxy_cycle() - # pick proxy in datacenter opposite to the primary's - next(proxies) - proxy = next(proxies) - consumer1 = proxy.create_client("consumer1") - consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + consumer3 = westp.create_client("consumer3") + # Should fail + consumer3.open(tc.URI_PRIORITY, flags=["read"], succeed=False) - consumer2 = proxy.create_client("consumer2") - consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) - leader = standard_cluster.last_known_leader - consumer3 = leader.create_client("consumer3") - consumer3.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) +def test_close_open(standard_cluster: Cluster): + """ + DRQS 169326671. Close, followed by Open with a different subId. + """ + proxies = standard_cluster.proxy_cycle() + # pick proxy in datacenter opposite to the primary's + next(proxies) + proxy = next(proxies) + consumer1 = proxy.create_client("consumer1") + consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - consumer1.close(tc.URI_FANOUT_FOO, succeed=True) - consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + consumer2 = proxy.create_client("consumer2") + consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) - @tweak.domain.max_consumers(1) - @tweak.cluster.queue_operations.reopen_retry_interval_ms(1234) - def test_close_while_retrying_reopen(self, standard_cluster: Cluster): - """ - DRQS 170043950. Trigger reopen failure causing proxy to retry on - timeout. While waiting, close the queue and make sure, the retry - accounts for that close. - """ + leader = standard_cluster.last_known_leader + consumer3 = leader.create_client("consumer3") + consumer3.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - proxies = standard_cluster.proxy_cycle() - # pick proxy in datacenter opposite to the primary's - next(proxies) - proxy1 = next(proxies) - proxy2 = next(proxies) + consumer1.close(tc.URI_FANOUT_FOO, succeed=True) + consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - producer = proxy1.create_client("producer") - consumer1 = proxy1.create_client("consumer1") - consumer2 = proxy2.create_client("consumer2") - producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) - consumer1.open(tc.URI_PRIORITY, flags=["read"], succeed=True) +@tweak.domain.max_consumers(1) +@tweak.cluster.queue_operations.reopen_retry_interval_ms(1234) +def test_close_while_retrying_reopen(standard_cluster: Cluster): + """ + DRQS 170043950. Trigger reopen failure causing proxy to retry on + timeout. While waiting, close the queue and make sure, the retry + accounts for that close. + """ - active_node = standard_cluster.process(proxy1.get_active_node()) - proxy1.suspend() + proxies = standard_cluster.proxy_cycle() + # pick proxy in datacenter opposite to the primary's + next(proxies) + proxy1 = next(proxies) + proxy2 = next(proxies) - # this is to trigger reopen when proxy1 resumes - active_node.force_stop() + producer = proxy1.create_client("producer") + consumer1 = proxy1.create_client("consumer1") + consumer2 = proxy2.create_client("consumer2") - # this is to make the reopen fail - consumer2.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) + consumer1.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - # trigger reopen - proxy1.resume() + active_node = standard_cluster.process(proxy1.get_active_node()) + proxy1.suspend() - # reopen should fail because of consumer2 - assert proxy1.capture( - r"queue reopen-request failed. .*, error response: \[ rId = \d+ choice = \[ status = \[ category = E_UNKNOWN code = -1 message = \"Client would exceed the limit of 1 consumer\(s\)\" \] \] \]. Attempt number was: 1. Attempting again after 1234 milliseconds", - timeout=10, - ) + # this is to trigger reopen when proxy1 resumes + active_node.force_stop() - # this should stop reopening consumer - consumer1.close(tc.URI_PRIORITY, succeed=True) + # this is to make the reopen fail + consumer2.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - # this is to make (re)open to succeed - consumer2.close(tc.URI_PRIORITY, succeed=True) + # trigger reopen + proxy1.resume() - # next reopen should not have readCount - assert proxy1.capture( - r"Sending request to .* \[request: \[ rId = \d+ choice = \[ openQueue = \[ handleParameters = \[ .* flags = 4 readCount = 0 writeCount = 1 adminCount = 0 \] \] \] \]", - timeout=10, - ) - - # verify new open - consumer1.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + # reopen should fail because of consumer2 + assert proxy1.capture( + r"queue reopen-request failed. .*, error response: \[ rId = \d+ choice = \[ status = \[ category = E_UNKNOWN code = -1 message = \"Client would exceed the limit of 1 consumer\(s\)\" \] \] \]. Attempt number was: 1. Attempting again after 1234 milliseconds", + timeout=10, + ) + + # this should stop reopening consumer + consumer1.close(tc.URI_PRIORITY, succeed=True) + + # this is to make (re)open to succeed + consumer2.close(tc.URI_PRIORITY, succeed=True) + + # next reopen should not have readCount + assert proxy1.capture( + r"Sending request to .* \[request: \[ rId = \d+ choice = \[ openQueue = \[ handleParameters = \[ .* flags = 4 readCount = 0 writeCount = 1 adminCount = 0 \] \] \] \]", + timeout=10, + ) + + # verify new open + consumer1.open(tc.URI_PRIORITY, flags=["read"], succeed=True) diff --git a/src/integration-tests/test_queue_reopen.py b/src/integration-tests/test_queue_reopen.py index 8cf8728c06..02404bd341 100644 --- a/src/integration-tests/test_queue_reopen.py +++ b/src/integration-tests/test_queue_reopen.py @@ -1,5 +1,5 @@ """ -Integration tests for queue reopen scenarios. +Integration tests for queue re-open scenarios. """ import bmq.dev.it.testconstants as tc @@ -10,57 +10,57 @@ from bmq.dev.it.process.client import Client -class TestReopenQueue: - def test_reopen_empty_queue(self, standard_cluster: Cluster): - """ - DRQS 158382093. If queue has no handles by the time cluster state - restores, it should still be notified in order to update its state. - """ - proxies = standard_cluster.proxy_cycle() - # pick proxy in datacenter opposite to the primary's - next(proxies) - replica_proxy = next(proxies) +def test_reopen_empty_queue(standard_cluster: Cluster): + """ + If queue has no handles by the time cluster state restores, it should + still be notified in order to update its state. + """ + proxies = standard_cluster.proxy_cycle() + # pick proxy in datacenter opposite to the primary's + next(proxies) + replica_proxy = next(proxies) - # Start a producer and open a queue - producer = replica_proxy.create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) + # Start a producer and open a queue + producer = replica_proxy.create_client("producer") + producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) - # If queue open has succeeded, then active_node is known - active_node = standard_cluster.process(replica_proxy.get_active_node()) + # If queue open has succeeded, then active_node is known + active_node = standard_cluster.process(replica_proxy.get_active_node()) - # Close the queue. The replica keeps (stale) RemoteQueue - producer.exit_gracefully() + # Close the queue. The replica keeps (stale) RemoteQueue + producer.exit_gracefully() - # Prevent 'active_node' from becoming new primary - active_node.set_quorum(4) + # Prevent 'active_node' from becoming new primary + active_node.set_quorum(4) - # Shutdown the primary - leader = standard_cluster.last_known_leader - leader.stop() + # Shutdown the primary + leader = standard_cluster.last_known_leader + leader.stop() - # Start a producer and open a queue - producer = replica_proxy.create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) + # Start a producer and open a queue + producer = replica_proxy.create_client("producer") + producer.open(tc.URI_PRIORITY, flags=["write,ack"], succeed=True) - # Post should result in successful Ack - assert ( - producer.post(tc.URI_PRIORITY, ["1"], wait_ack=True, succeed=True) - == Client.e_SUCCESS - ) + # Post should result in successful Ack + assert ( + producer.post(tc.URI_PRIORITY, ["1"], wait_ack=True, succeed=True) + == Client.e_SUCCESS + ) - def test_reopen_substream(self, standard_cluster: Cluster): - """ - DRQS 169527537. Make a primary's client reopen the same appId with a - different subId. - """ - leader = standard_cluster.last_known_leader - consumer1 = leader.create_client("consumer1") - consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) +def test_reopen_substream(standard_cluster: Cluster): + """ + DRQS 169527537. Make a primary's client reopen the same appId with a + different subId. + """ - consumer2 = leader.create_client("consumer2") - consumer2.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) + leader = standard_cluster.last_known_leader + consumer1 = leader.create_client("consumer1") + consumer1.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) - consumer2.close(tc.URI_FANOUT_FOO, succeed=True) - consumer2.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + consumer2 = leader.create_client("consumer2") + consumer2.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) + consumer2.open(tc.URI_FANOUT_BAR, flags=["read"], succeed=True) + + consumer2.close(tc.URI_FANOUT_FOO, succeed=True) + consumer2.open(tc.URI_FANOUT_FOO, flags=["read"], succeed=True) diff --git a/src/integration-tests/test_reopen_queue_failure.py b/src/integration-tests/test_reopen_queue_failure.py index 91af74c7ac..28f12f5901 100644 --- a/src/integration-tests/test_reopen_queue_failure.py +++ b/src/integration-tests/test_reopen_queue_failure.py @@ -1,59 +1,58 @@ +""" +This test case verifies a fix for the broker crash when replica or proxy node +crashes while processing a configure or close queue response after a reopen +queue has failed. All nodes going down gracefully at cluster shutdown verifies +the fix. +""" + import bmq.dev.it.testconstants as tc -from bmq.dev.it.fixtures import ( # pylint: disable=unused-import - Cluster, - standard_cluster as cluster, +from bmq.dev.it.fixtures import Cluster +from bmq.dev.it.fixtures import ( + standard_cluster as cluster, # pylint: disable=unused-import ) -class TestReopenQueueFailure: - """ - This test case verifies a fix for the broker crash when replica or proxy - node crashes while processing a configure or close queue response after a - reopen queue has failed. All nodes going down gracefully at cluster - shutdown verifies the fix. - """ - - def test_reopen_queue_failure(self, cluster: Cluster): - proxies = cluster.proxy_cycle() - - # We want proxy connected to a replica - next(proxies) - - proxy = next(proxies) - consumer = proxy.create_client("consumer") - - consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - - # Set the quorum of all non-leader nodes to 99 to prevent them from - # becoming a new leader - leader = cluster.last_known_leader - next_leader = None - for node in cluster.nodes(): - # NOTE: Following assumes 4-node cluster - if node != leader: - node.set_quorum(99) - if node.datacenter == leader.datacenter: - next_leader = node - assert leader != next_leader - - # Kill the leader - cluster.drain() - leader.check_exit_code = False - leader.kill() - leader.wait() - - # Remove routing config on the next leader (to cause reopen - # queue failure) - cluster.work_dir.joinpath( - next_leader.name, "etc", "domains", f"{tc.DOMAIN_PRIORITY}.json" - ).unlink() - next_leader.command("DOMAINS RESOLVER CACHE_CLEAR ALL") - - # Make the quorum for selected node be 1 so it becomes new leader - next_leader.set_quorum(1) - - # Wait for new leader - cluster.wait_leader() - assert cluster.last_known_leader == next_leader - - consumer.stop_session() +def test_reopen_queue_failure(cluster: Cluster): + proxies = cluster.proxy_cycle() + + # We want proxy connected to a replica + next(proxies) + + proxy = next(proxies) + consumer = proxy.create_client("consumer") + + consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + + # Set the quorum of all non-leader nodes to 99 to prevent them from + # becoming a new leader + leader = cluster.last_known_leader + next_leader = None + for node in cluster.nodes(): + # NOTE: Following assumes 4-node cluster + if node != leader: + node.set_quorum(99) + if node.datacenter == leader.datacenter: + next_leader = node + assert leader != next_leader + + # Kill the leader + cluster.drain() + leader.check_exit_code = False + leader.kill() + leader.wait() + + # Remove routing config on the next leader (to cause reopen + # queue failure) + cluster.work_dir.joinpath( + next_leader.name, "etc", "domains", f"{tc.DOMAIN_PRIORITY}.json" + ).unlink() + next_leader.command("DOMAINS RESOLVER CACHE_CLEAR ALL") + + # Make the quorum for selected node be 1 so it becomes new leader + next_leader.set_quorum(1) + + # Wait for new leader + cluster.wait_leader() + assert cluster.last_known_leader == next_leader + + consumer.stop_session() diff --git a/src/integration-tests/test_restart.py b/src/integration-tests/test_restart.py index df2797483c..c78347a9f5 100644 --- a/src/integration-tests/test_restart.py +++ b/src/integration-tests/test_restart.py @@ -13,102 +13,90 @@ from bmq.dev.it.util import attempt, wait_until -class TestRestartCluster: - """This suite of test cases exercises basic functionality of the cluster - while restarting all nodes in the cluster. - """ - - def test_basic(self, cluster: Cluster): - - # Start a producer and post a message. - proxies = cluster.proxy_cycle() - producer = next(proxies).create_client("producer") - producer.open(tc.URI_PRIORITY, flags=["write", "ack"], succeed=True) - producer.post(tc.URI_PRIORITY, payload=["msg1"], wait_ack=True, succeed=True) - - time.sleep(2) - # Before restarting the cluster, ensure that all nodes in the cluster - # have received the message at the storage layer. This is necessary - # in the absence of stronger consistency in storage replication in - # BMQ. Presence of message in the storage at each node is checked by - # sending 'STORAGE SUMMARY' command and grepping its output. +def test_basic(cluster: Cluster): + # Start a producer and post a message. + proxies = cluster.proxy_cycle() + producer = next(proxies).create_client("producer") + producer.open(tc.URI_PRIORITY, flags=["write", "ack"], succeed=True) + producer.post(tc.URI_PRIORITY, payload=["msg1"], wait_ack=True, succeed=True) + + time.sleep(2) + # Before restarting the cluster, ensure that all nodes in the cluster + # have received the message at the storage layer. This is necessary + # in the absence of stronger consistency in storage replication in + # BMQ. Presence of message in the storage at each node is checked by + # sending 'STORAGE SUMMARY' command and grepping its output. + for node in cluster.nodes(): + node.command(f"CLUSTERS CLUSTER {node.cluster_name} STORAGE SUMMARY") + + time.sleep(2) + for node in cluster.nodes(): + assert node.outputs_regex( + r"\w{10}\s+0\s+1\s+\d+\s+B\s+" + re.escape(tc.URI_PRIORITY), + timeout=20, + ) + # Above regex is to match line: + # C1E2A44527 0 1 68 B bmq://bmq.test.mmap.priority.~tst/qqq + # where columns are: QueueKey, PartitionId, NumMsgs, NumBytes, + # QueueUri respectively. Since we opened only 1 queue, we know that + # it will be assigned to partitionId 0. + + cluster.restart_nodes() + # For a standard cluster, states have already been restored as part of + # leader re-election. + if cluster.is_local: + producer.wait_state_restored() + + producer.post(tc.URI_PRIORITY, payload=["msg2"], wait_ack=True, succeed=True) + + consumer = next(proxies).create_client("consumer") + consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) + consumer.wait_push_event() + assert wait_until(lambda: len(consumer.list(tc.URI_PRIORITY, block=True)) == 2, 2) + + +def test_wrong_domain(cluster: Cluster): + proxies = cluster.proxy_cycle() + producer = next(proxies).create_client("producer") + + assert Client.e_SUCCESS is producer.open(tc.URI_FANOUT, flags=["write"], block=True) + assert Client.e_SUCCESS is not producer.open( + "bmq://domain.does.not.exist/qqq", + flags=["write"], + block=True, + no_except=True, + ) + + +def test_migrate_domain_to_another_cluster(cluster: Cluster): + proxies = cluster.proxy_cycle() + producer = next(proxies).create_client("producer") + + assert Client.e_SUCCESS == producer.open(tc.URI_FANOUT, flags=["write"], block=True) + + # Before changing domain config of each node in the cluster, ensure that + # all nodes in the cluster have observed the previous open-queue event. + # If we don't do this, replicas may receive a queue creation event at + # the storage layer, pick up the domain, and, as a result, fail to + # successfully apply the queue creation event. + @attempt(3, 5) + def wait_replication(): for node in cluster.nodes(): node.command(f"CLUSTERS CLUSTER {node.cluster_name} STORAGE SUMMARY") - time.sleep(2) for node in cluster.nodes(): assert node.outputs_regex( - r"\w{10}\s+0\s+1\s+\d+\s+B\s+" + re.escape(tc.URI_PRIORITY), - timeout=20, + r"\w{10}\s+0\s+0\s+\d+\s+B\s+" + re.escape(tc.URI_FANOUT), + timeout=1, ) # Above regex is to match line: - # C1E2A44527 0 1 68 B bmq://bmq.test.mmap.priority.~tst/qqq + # C1E2A44527 0 0 68 B bmq://bmq.test.mmap.fanout/qqq # where columns are: QueueKey, PartitionId, NumMsgs, NumBytes, # QueueUri respectively. Since we opened only 1 queue, we know that # it will be assigned to partitionId 0. - cluster.restart_nodes() - # For a standard cluster, states have already been restored as part of - # leader re-election. - if cluster.is_local: - producer.wait_state_restored() - - producer.post(tc.URI_PRIORITY, payload=["msg2"], wait_ack=True, succeed=True) - - consumer = next(proxies).create_client("consumer") - consumer.open(tc.URI_PRIORITY, flags=["read"], succeed=True) - consumer.wait_push_event() - assert wait_until( - lambda: len(consumer.list(tc.URI_PRIORITY, block=True)) == 2, 2 - ) - - def test_wrong_domain(self, cluster: Cluster): - proxies = cluster.proxy_cycle() - producer = next(proxies).create_client("producer") - - assert Client.e_SUCCESS is producer.open( - tc.URI_FANOUT, flags=["write"], block=True - ) - assert Client.e_SUCCESS is not producer.open( - "bmq://domain.does.not.exist/qqq", - flags=["write"], - block=True, - no_except=True, - ) - - def test_migrate_domain_to_another_cluster(self, cluster: Cluster): - proxies = cluster.proxy_cycle() - producer = next(proxies).create_client("producer") + cluster.config.domains.clear() + cluster.deploy_domains() + cluster.restart_nodes() - assert Client.e_SUCCESS == producer.open( - tc.URI_FANOUT, flags=["write"], block=True - ) - - # Before changing domain config of each node in the cluster, ensure that - # all nodes in the cluster have observed the previous open-queue event. - # If we don't do this, replicas may receive a queue creation event at - # the storage layer, pick up the domain, and, as a result, fail to - # successfully apply the queue creation event. - @attempt(3, 5) - def wait_replication(): - for node in cluster.nodes(): - node.command(f"CLUSTERS CLUSTER {node.cluster_name} STORAGE SUMMARY") - - for node in cluster.nodes(): - assert node.outputs_regex( - r"\w{10}\s+0\s+0\s+\d+\s+B\s+" + re.escape(tc.URI_FANOUT), - timeout=1, - ) - # Above regex is to match line: - # C1E2A44527 0 0 68 B bmq://bmq.test.mmap.fanout/qqq - # where columns are: QueueKey, PartitionId, NumMsgs, NumBytes, - # QueueUri respectively. Since we opened only 1 queue, we know that - # it will be assigned to partitionId 0. - - cluster.config.domains.clear() - cluster.deploy_domains() - cluster.restart_nodes() - - assert Client.e_SUCCESS != producer.open( - tc.URI_FANOUT, flags=["write"], block=True - ) + assert Client.e_SUCCESS != producer.open(tc.URI_FANOUT, flags=["write"], block=True) diff --git a/src/integration-tests/test_strong_consistency.py b/src/integration-tests/test_strong_consistency.py index 9b690e3434..144f4bfa17 100644 --- a/src/integration-tests/test_strong_consistency.py +++ b/src/integration-tests/test_strong_consistency.py @@ -1,7 +1,6 @@ import contextlib import bmq.dev.it.testconstants as tc -from bmq.schemas import mqbconf from bmq.dev.it.fixtures import ( # pylint: disable=unused-import Cluster, local_cluster, @@ -9,6 +8,7 @@ tweak, ) from bmq.dev.it.util import wait_until +from bmq.schemas import mqbconf class Suspender: diff --git a/src/integration-tests/test_subscriptions.py b/src/integration-tests/test_subscriptions.py index 219e778214..97fa6b8595 100644 --- a/src/integration-tests/test_subscriptions.py +++ b/src/integration-tests/test_subscriptions.py @@ -53,7 +53,6 @@ import bmq.dev.it.testconstants as tc import pytest -from bmq.schemas import mqbcfg from bmq.dev.it.fixtures import ( # pylint: disable=unused-import Cluster, cluster, @@ -64,6 +63,7 @@ from bmq.dev.it.process.broker import Broker from bmq.dev.it.process.client import Client, ITError, Message from bmq.dev.it.util import wait_until +from bmq.schemas import mqbcfg EMPTY_SUBSCRIPTION = [] EPS = 1e-6