From 8ad178851cc8dd01e5387336b58f5d6940aef8a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carl=20Wall=C3=A9n?= Date: Thu, 9 Mar 2023 15:46:17 +0200 Subject: [PATCH] em-odp v3.0.0 Event Machine (EM) on ODP v3.0.0 See changes in CHANGE_NOTES, README and include/event_machine/README_API --- CHANGE_NOTES | 190 +- README | 41 +- config/em-odp.conf | 788 +-- configure.ac | 70 +- doc/Doxyfile | 217 +- include/event_machine.h | 540 ++- include/event_machine/README_API | 187 +- .../event_machine/api/event_machine_event.h | 1760 ++++--- .../api/event_machine_event_group.h | 916 ++-- .../event_machine/api/event_machine_types.h | 1090 ++--- .../helper/event_machine_debug.h | 89 + .../event_machine/platform/env/env_bitmask.h | 606 ++- .../event_machine/platform/env/env_spinlock.h | 148 +- .../event_machine/platform/env/environment.h | 253 +- .../platform/event_machine_config.h | 580 ++- .../platform/event_machine_hooks.h | 751 +-- .../platform/event_machine_hw_specific.h | 558 +-- .../platform/event_machine_hw_types.h | 1215 ++--- .../platform/event_machine_init.h | 610 +-- .../platform/event_machine_odp_ext.h | 466 +- .../platform/event_machine_pool.h | 916 ++-- m4/em_libconfig.m4 | 2 +- programs/common/cm_error_handler.h | 121 +- programs/common/cm_pktio.c | 2630 +++++----- programs/common/cm_pktio.h | 1204 ++--- programs/common/cm_pool_config.h | 113 +- programs/common/cm_setup.c | 3140 ++++++------ programs/common/cm_setup.h | 458 +- programs/example/api-hooks/api_hooks.c | 1280 ++--- programs/example/queue/ordered.c | 1710 ++++--- programs/example/queue/queue_types_ag.c | 3173 ++++++------ programs/example/queue/queue_types_local.c | 3187 ++++++------ programs/example/queue_group/queue_group.c | 2188 ++++----- programs/example/test/test.c | 1162 ++--- programs/packet_io/Makefile.am | 8 +- programs/packet_io/l2fwd.c | 553 +++ programs/packet_io/loopback.c | 1610 +++--- programs/packet_io/loopback_ag.c | 1626 +++---- programs/packet_io/loopback_local.c | 1648 +++---- programs/packet_io/loopback_local_multircv.c | 1680 +++---- programs/packet_io/loopback_multircv.c | 1640 +++---- programs/packet_io/multi_stage.c | 1968 ++++---- programs/packet_io/multi_stage_local.c | 2004 ++++---- programs/performance/Makefile.am | 12 +- programs/performance/atomic_processing_end.c | 1619 ++++--- programs/performance/loop.c | 914 ++-- programs/performance/loop_multircv.c | 934 ++-- programs/performance/loop_refs.c | 464 ++ programs/performance/pairs.c | 1112 +++-- programs/performance/queues.c | 2062 ++++---- programs/performance/queues_local.c | 2132 ++++---- programs/performance/queues_unscheduled.c | 2412 ++++----- programs/performance/scheduling_latency.c | 607 +++ programs/performance/scheduling_latency.h | 119 + programs/performance/send_multi.c | 2602 +++++----- programs/performance/timer_test_periodic.c | 4309 +++++++++-------- robot-tests/README.md | 4 +- robot-tests/common.resource | 25 +- robot-tests/example/emcli.robot | 10 +- robot-tests/example/startup_pools.robot | 178 + .../test-startup-pools-confs/bad_num.conf | 6 + .../default-id-non-default-name.conf | 16 + .../default-name-non-default-id.conf | 16 + .../invalid-align-offset.conf | 17 + .../invalid-name.conf | 15 + .../invalid-pkt-headroom.conf | 19 + .../invalid-user-area.conf | 17 + .../no-align-offset-in-use-value.conf | 16 + .../no-align-offset-in-use.conf | 17 + .../no-align-offset-value.conf | 17 + .../test-startup-pools-confs/no-conf.conf | 4 + .../no-event-type.conf | 7 + .../no-num-subpools.conf | 9 + .../test-startup-pools-confs/no-pool-cfg.conf | 5 + .../no-subpools-num.conf | 13 + .../no-subpools-size.conf | 11 + .../test-startup-pools-confs/no-subpools.conf | 10 + .../non-default-pools.conf | 84 + .../num-conf-not-match.conf | 6 + .../num-subpools-not-match.conf | 12 + robot-tests/performance/loop_refs.robot | 29 + scripts/em_odp_check | 6 +- scripts/robot_test.sh | 10 +- scripts/style_check.py | 134 +- src/Makefile.am | 3 + src/add-ons/event_timer/em_timer.c | 118 +- src/add-ons/event_timer/event_machine_timer.c | 2073 ++++---- src/em_atomic_group.c | 912 ++-- src/em_atomic_group.h | 288 +- src/em_atomic_group_types.h | 180 +- src/em_chaining.c | 486 +- src/em_chaining.h | 450 +- src/em_cli.c | 2164 ++++----- src/em_core.c | 320 +- src/em_daemon_eo.c | 435 +- src/em_dispatcher.c | 330 +- src/em_dispatcher.h | 1232 ++--- src/em_dispatcher_types.h | 108 +- src/em_eo.c | 2802 +++++------ src/em_eo.h | 356 +- src/em_eo_types.h | 204 +- src/em_error.h | 222 +- src/em_event.c | 527 +- src/em_event.h | 2944 ++++++----- src/em_event_inline.h | 113 + src/em_event_state.c | 2164 +++++---- src/em_event_state.h | 554 +-- src/em_event_types.h | 596 +-- src/em_hook_types.h | 213 +- src/em_hooks.c | 590 +-- src/em_hooks.h | 268 +- src/em_include.h | 245 +- src/em_info.c | 714 ++- src/em_init.c | 300 +- src/em_init.h | 336 +- src/em_internal_event.c | 1059 ++-- src/em_libconfig.c | 880 ++-- src/em_libconfig.h | 413 +- src/em_libconfig_types.h | 119 +- src/em_mem.h | 500 +- src/em_pool.c | 2657 ++++++---- src/em_pool.h | 309 +- src/em_queue.c | 3363 ++++++------- src/em_queue.h | 596 +-- src/em_queue_inline.h | 208 + src/em_queue_types.h | 550 ++- src/event_machine_atomic_group.c | 941 ++-- src/event_machine_dispatcher.c | 456 +- src/event_machine_eo.c | 2272 ++++----- src/event_machine_event.c | 2636 +++++----- src/event_machine_event_group.c | 1499 +++--- src/event_machine_helper.c | 245 +- src/event_machine_hooks.c | 400 +- src/event_machine_hw_specific.c | 402 +- src/event_machine_init.c | 906 ++-- src/event_machine_odp_ext.c | 561 ++- src/event_machine_pool.c | 588 +-- src/misc/objpool.c | 245 +- 138 files changed, 58144 insertions(+), 51015 deletions(-) create mode 100644 include/event_machine/helper/event_machine_debug.h create mode 100644 programs/packet_io/l2fwd.c create mode 100644 programs/performance/loop_refs.c create mode 100644 programs/performance/scheduling_latency.c create mode 100644 programs/performance/scheduling_latency.h create mode 100644 robot-tests/example/startup_pools.robot create mode 100644 robot-tests/example/test-startup-pools-confs/bad_num.conf create mode 100644 robot-tests/example/test-startup-pools-confs/default-id-non-default-name.conf create mode 100644 robot-tests/example/test-startup-pools-confs/default-name-non-default-id.conf create mode 100644 robot-tests/example/test-startup-pools-confs/invalid-align-offset.conf create mode 100644 robot-tests/example/test-startup-pools-confs/invalid-name.conf create mode 100644 robot-tests/example/test-startup-pools-confs/invalid-pkt-headroom.conf create mode 100644 robot-tests/example/test-startup-pools-confs/invalid-user-area.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-align-offset-in-use-value.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-align-offset-in-use.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-align-offset-value.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-conf.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-event-type.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-num-subpools.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-pool-cfg.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-subpools-num.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-subpools-size.conf create mode 100644 robot-tests/example/test-startup-pools-confs/no-subpools.conf create mode 100644 robot-tests/example/test-startup-pools-confs/non-default-pools.conf create mode 100644 robot-tests/example/test-startup-pools-confs/num-conf-not-match.conf create mode 100644 robot-tests/example/test-startup-pools-confs/num-subpools-not-match.conf create mode 100644 robot-tests/performance/loop_refs.robot create mode 100644 src/em_event_inline.h create mode 100644 src/em_queue_inline.h diff --git a/CHANGE_NOTES b/CHANGE_NOTES index eca01792..25e13286 100644 --- a/CHANGE_NOTES +++ b/CHANGE_NOTES @@ -1,4 +1,4 @@ -Copyright (c) 2013-2022, Nokia Solutions and Networks +Copyright (c) 2013-2023, Nokia Solutions and Networks All rights reserved. Redistribution and use in source and binary forms, with or without @@ -52,6 +52,194 @@ Examples: - See em-odp/README for usage and compilation instructions. - See em-odp/include/event_machine/README_API for API changes +-------------------------------------------------------------------------------- +Event Machine (EM) on ODP v3.0.0 +-------------------------------------------------------------------------------- +- Support for EM API v3.0 (em-odp/include), see API changes in + em-odp/include/event_machine/README_API. + +- Event References + See em-odp/include/event_machine/README_API + +- Event Vectors + See em-odp/include/event_machine/README_API + +- EM debug timestamps + See em-odp/include/event_machine/README_API + - Enabled with new configure-script option '--enable-debug-timestamps': + (see configure.ac) + Override EM-define value 'EM_DEBUG_TIMESTAMP_ENABLE' to 0...2 + --enable-debug-timestamps=0...2 Set 'EM_DEBUG_TIMESTAMP_ENABLE' to the + given value: 1=low overhead, 2=strict time + --enable-debug-timestamps<=yes(=1) Set 'EM_DEBUG_TIMESTAMP_ENABLE' to 1 + --enable-debug-timestamps Set 'EM_DEBUG_TIMESTAMP_ENABLE' to 1 + --disable-debug-timestamps or + no option given Use 'EM_DEBUG_TIMESTAMP_ENABLE' value + from source code (0...2, default 0) + +- EM config file options - config/em-odp.conf: + - Config file version number bumped up to "0.0.15", see new options below. + +- EM config file option: event_chaining.order_keep = false/true + Option description: + Events sent with em_send...() to an event chaining queue will be + passed to the user provided 'event_send_device/_multi()' functions. + EM can ensure that calls to these functions are: + 1) made in order (order_keep = true) + or + 2) make no extra effort to keep the order (order_keep = false), + e.g. if the user (or packet output) handles ordering or does not + care about it. + Note: Only meaningful when used together with (parallel-)ordered queues. + Order is implicitly guaranteed when sent during an atomic context and + never guaranteed when sent during a parallel context. + Only set to 'true' if queues of type EM_QUEUE_TYPE_PARALLEL_ORDERED + are used with event chaining, no benefit otherwise. Using 'false' lets EM + save on some resources. + + Note! Currently only event_chaining.order_keep = false is supported. + Trying 'true' gives the following printout: + "Config option event_chaining.order_keep: true(1) currently not supported" + +- Env additions + - Return difference of cpu cycles (cycles2 - cycles1): + uint64_t env_cycles_diff(uint64_t cycles2, uint64_t cycles1) + +- Event State Verification (ESV) changes + The introduction of Event References has caused ESV to change a bit. + The ESV counters are always modified together atomically as one uint64_t, + as before. One of the ESV-counters, 'free_cnt', has been replaced by 'ref_cnt' + that stands for 'reference count', i.e. how many references exist for a + certain event. A newly allocated event with no further references has + 'ref_cnt=1'. When one additional reference is created, the reference count + increases to 'ref_cnt=2' etc. Freeing an event, or a reference, decreases the + reference count. A freed event (back into the event pool) should have + 'ref_cnt=0'. + The 'send_cnt' works as before, sending the event (or e.g. giving it to the + timer) increases the 'send_cnt' while dispatching or dequeueing decreases the + 'send_cnt'. Also the 'evgen' count works as before but has been restricted + to "normal" events only, i.e. 'evgen' will not be taken into account for + references. + + 1) "Normal" ESV Error format: (double-free error) + ----------------------------- + EM ERROR:0x80000010 ESCOPE:0xFF000603 + core:00 ecount:0(0) em_event_state.c:350 esv_error() + ESV: Event:0x27fffd9c7f000 state error -- counts: send:0 ref:-1 evgen:2(3) + Help: OK: 'send <= ref, both >=0' AND 'hdl evgen == evgen'. Err otherwise + prev-state:em_free() core:00: EO:0xA-"myEO1" Q:0xB-"myQ1" u32[0]:(n/a) + => err-state:em_free() core:01: EO:0xC-"myEO2" Q:0xD-"myQ2" u32[0]:0x00000000 + event:0x00027fffd9c7f000: ptr:0x7fffd9c7f000 + + 2) Reference ESV Error format: ("too many frees" error, here two frees) + ------------------------------ + EM ERROR:0x80000010 ESCOPE:0xFF000610 + core:00 ecount:0(0) em_event_state.c:342 esv_error() + ESV: RefEvent:0x27fffd9472000 state error -- counts: send:0 ref:-1 (evgen:4 ignored for refs) + Help: OK: 'send <= ref, both >=0'. Err otherwise + prev-state:n/a (not valid for event references) + => err-state:em_free() core:00: EO:0xC-"myEO2" Q:0xD-"myQ2" u32[0]:0x00000000 + event:0x00027fffd9472000: ptr:0x7fffd9472000 + + 3) Unmark ESV Error format: + --------------------------- + EM ERROR:0x80000010 ESCOPE:0xFF000611 + core:00 ecount:0(0) em_event_state.c:363 esv_error() + ESV: Event:0x27fffd9c7f100 state error - Invalid 'unmark'-API use + prev-state:em_free() core:00: EO:0xA-"myEO1" Q:0xB-"myQ1" u32[0]:(n/a) + => err-state:em_event_unmark_free() core:00: EO:0xC-"myEO2" Q:0xD-"myQ2" u32[0]:0x00000000 + +-------------------------------------------------------------------------------- +Event Machine (EM) on ODP v2.10.0 +-------------------------------------------------------------------------------- +- Support for EM API v2.10 (em-odp/include), see API changes in + em-odp/include/event_machine/README_API. + * EM Idle Hooks + * Startup pools (changes mostly related to the EM config file) + +- EM Idle Hooks + (see include/event_machine/platform/event_machine_hooks.h for details) + The user can provide idle hooks to EM that will be run on each EM core when + there are no events to process ('to_idle' and 'while_idle' hooks). + Further, the 'to_active' hook will be run, if provided, when the core again + starts receiving events. + User idle hook functions can be provided via em_conf_t::idle_hooks{...} + when calling em_init() or via hook register functions. EM will call the + given hooks in the dispatcher depending on whether there are events to be + processed by the core. + Note: the 'EM_IDLE_HOOKS_ENABLE' value can be overridden by a command-line + option to the 'configure' script, e.g.: + $build> ../configure ... --enable-idle-hooks + The overridden value will be made available to the application via a + pkgconfig set define. + Idle hooks available: + - em_idle_hook_to_idle_t: + The 'to_idle' hook will be called by the EM dispatcher when a core is + entering the IDLE state i.e. when the core doesn't get any new events + to process. + - em_idle_hook_to_active_t: + The 'to_active' hook will be called by the EM dispatcher when a core is + entering the ACTIVE state i.e. when the core gets events after being idle. + - em_idle_hook_while_idle_t: + The 'while_idle' hook will be called by the EM dispatcher when a core is + already in the IDLE state and stays in it i.e. the core doesn't get any + events. + Register-functions: + em_status_t em_hooks_register_to_idle(em_idle_hook_to_idle_t func); + em_status_t em_hooks_unregister_to_idle(em_idle_hook_to_idle_t func); + em_status_t em_hooks_register_to_active(em_idle_hook_to_active_t func); + em_status_t em_hooks_unregister_to_active(em_idle_hook_to_active_t func); + em_status_t em_hooks_register_while_idle(em_idle_hook_while_idle_t func); + em_status_t em_hooks_unregister_while_idle(em_idle_hook_while_idle_t func); + Alternatively provide the idle-hook functions as arguments to em_init() at + start-up: em_init(em_conf_t::idle_hooks.to_idle_hook + .to_active_hook + .while_idle_hook); + +- Startup pools + (see config/em-odp.conf for more documentation and details) + - EM config file option: 'startup_pools' + Configure EM event-pools via the EM config file. These pools are created + during EM startup and destroyed when EM terminates (if still available). + When startup pools are used, EM will create them according to the + configuration given in the EM config file during em_init(). + Startup pools are configured via the EM config file option: 'startup_pools' + The user should override the EM default config file (config/em-odp.conf) with + a separate runtime configuration file containing the desired application pool + configurations and provide it to EM via the 'EM_CONFIG_FILE' env variable: + $> EM_CONFIG_FILE=/my-em.conf ./my-em-app [args] + If no startup pools are given, only the default pool needs to be created. In + this case, the default pool configuration is specified in the parameters of + em_init(), more specifically, in the struct em_conf_t::default_pool_cfg + + Note that if the default pool is configured via the config file, it overrides + the default pool configuration given to em_init(). + The priority regarding the default pool configuration is as follows: + +--------------+ +-------------+ +-----------------------------+ + | Runtime.conf | > | em-odp.conf | > | em_conf_t::default_pool_cfg | + +--------------+ +-------------+ +-----------------------------+ + The default pool configuration specified in runtime configuration file + overrides the one given in the default configuration file(em-odp.conf), which + overrides the one passed as a parameter to em_init(). + + Note that the 'startup_pools'-options are pool specific configurations given + directly to em_pool_create(name, pool, pool_cfg), thus overriding the global + config file settings such as pool.align_offset, pool.user_area_size and + pool.pkt_headroom that might be set in the config file. + +- Core: support odp-thread counts larger than 256 + +- ESV: reduce the number of ESV state transitions for output from EM + Event Chaining output and output via EM output queues are both based on + calling a user provided function triggered from em_send() - no need to + toggle ESV state (between usr2em <-> em2usr) many times. Keep the ownership of + the event with the user until the user provided callback. + +- Queue: check that the EM queue context is valid + EM will verify that the used ODP queue context actually points to an + EM queue-elem and not to something else. + Only enabled with EM_CHECK_LEVEL=3 (configure option: --enable-check-level=3) + -------------------------------------------------------------------------------- Event Machine (EM) on ODP v2.9.0 -------------------------------------------------------------------------------- diff --git a/README b/README index a988b428..7236b9a6 100644 --- a/README +++ b/README @@ -1,4 +1,4 @@ -Copyright (c) 2013-2022, Nokia Solutions and Networks +Copyright (c) 2013-2023, Nokia Solutions and Networks All rights reserved. Redistribution and use in source and binary forms, with or without @@ -85,6 +85,45 @@ runtime config files: Stop by pressing Ctrl-C. +=============================================================================== +ODP configuration changes for EM (em-odp) +=============================================================================== +The ODP configuration file lets the user tweak ODP settings. +For odp-linux, some settings are better modified when using EM (em-odp), read +about config file usage here: /config/README + +Either change the default values in the config files and recompile or +override the defaults by providing your own config files at startup: +> ODP_CONFIG_FILE=my-odp.conf EM_CONFIG_FILE=my-em.conf \ + ./programs/example/hello/hello -c 0xe -t + +odp-linux default configuration file: /config/odp-linux-generic.conf +Usually EM work better with the following config changes: + +- Timer: Use inline timer implementation with EM. + timer.inline = 0 -> 1 + +- Scheduler: + - Priority level spread, the optimal value is typically the number of threads + using the scheduler: + sched_basic.prio_spread = 4 -> 'number of EM cores used' + + - Disable ODP automatically updated schedule groups, EM does not need them. + (These options are actually DEPRECATED so prefer using the ODP API function + odp_schedule_config() instead) + sched_basic.group_enable: {all = 1 -> 0 + worker = 1 -> 0 + control = 1 -> 0} + + - Ordered queue reorder stash size. ODP might drop events if the + stash/queue becomes full, causing EM ESV failure since event(s) dropped + outside of EM can't be tracked, thus disable this. + sched_basic.order_stash_size = 512 -> 0 + +- Stash: Disable strict stash size. + Rely on stash 'put' return value to determine operation status. + stash.strict_size = 1 -> 0 + =============================================================================== CPU Architecture specific options =============================================================================== diff --git a/config/em-odp.conf b/config/em-odp.conf index 28cbe47e..b981db0f 100644 --- a/config/em-odp.conf +++ b/config/em-odp.conf @@ -1,315 +1,473 @@ -# EM runtime configuration options -# -# This template configuration file (em-odp.conf) is hardcoded -# during the configure/build phase into em-odp and the values defined here are -# used at runtime unless overridden by the user with the optional environment -# variable EM_CONFIG_FILE=my-emodp.conf at program startup. -# -# This configuration file MUST include all configuration options. -# -# The environment variable EM_CONFIG_FILE can be used to override default values -# and it doesn't have to include all available options. The missing options are -# replaced with hardcoded default values. -# -# The options defined here are implementation specific and valid option -# values should be checked from the implementation code. - -# Mandatory fields -em_implementation = "em-odp" -config_file_version = "0.0.14" - -# Pool options -pool: { - # Enable EM pool usage statistics collection during runtime (true/false) - # - # Pool usage statistics is updated e.g. during em_alloc() and em_free(), - # thus, if enabled, inducing some overhead. Pool information can be - # obtained by calling em_pool_info() and, if pool usage statistics is - # enabled, will return also pool usage statistics. - statistics_enable = false - - # Default alignment offset in bytes for the event payload start address - # - # Set the event payload alignment offset for events allocated from - # any pool. This is a global setting concerning all pools. - # A similar, but pool-specific option, is 'em_pool_cfg_t::align_offset{}' - # that overrides this global setting for a specific pool when given to - # em_pool_create(..., pool_cfg). - # - # Use this option to globally adjust the payload layout so that a - # specific part of it can be placed at a needed alignment for e.g. - # HW access. - # - # The default EM event payload start address alignment is a power-of-two - # that is at minimum 32 bytes (i.e. 32 B, 64 B, 128 B etc. depending on - # e.g. target cache-line size). - # The 'align_offset' option can be used to fine-tune the start-address - # by a small offset to e.g. make room for a small SW header before the - # rest of the payload that might need a specific alignment for direct - # HW-access. - # Example: setting 'align_offset = 8' makes sure that the payload - # _after_ 8 bytes will be aligned at minimum (2^x) 32 bytes for all - # pools that do not override this value. - # - # start: base - align_offset - # | - # v - # +------------------------------------------+ - # | <----- | Event Payload | - # +------------------------------------------+ - # ^ - # | - # base (min 32B aligned, power-of-two) - align_offset = 0 - - # Default event user area size in bytes for all events from all pools. - # - # The event user area is located within the event metadata (hdr) and - # is not part of the event payload. The event user area can e.g. be - # used to store additional state data related to the event payload - # content. - # - # This is a global setting that can be overridden per pool using - # 'em_pool_cfg_t::user_area{}' with em_pool_create(). - # - user_area_size = 0 - - # Default minimum packet headroom in bytes for events allocated from - # EM-pools of type: EM_EVENT_TYPE_PACKET. Ignored for other pool types. - # - # This is a global setting for EM-pools of type EM_EVENT_TYPE_PACKET. - # A similar, but pool-specific option, is 'em_pool_cfg_t::pkt.headroom{}' - # that overrides this global setting for a specific pkt-pool when given - # to em_pool_create(..., pool_cfg). - # - # Use this option to globally set the minimum headroom in bytes for - # events/packets allocated from pkt-pools. Each event/packet will have - # at least this much headroom. - # - # 0: Explicitly set 'No headroom' for the pool. - # - # Max value is determined by the ODP implementation capabilities - # (a value larger than 'max' will lead to setup error) - # - # Note: using 'align_offset > 0' reduces the packet-headroom by the - # same amount, for example: - # "align_offset=8, pkt_headroom=128 ==> headroom= 128-8 = 120 B" - # - pkt_headroom = 128 -} - -queue_group: { - # Create the EM single-core queue groups (true/false) - # - # Select whether EM should create a queue group per EM-core. - # Corresponds to the queue groups with name: - # EM_QUEUE_GROUP_CORE_BASE_NAME + "%d" - # Each created queue group only has one core set in its core mask. - # - # EM earlier relied on these queue groups for internal core specific - # messaging and also allowed applicatioins to use them. Currently EM - # does not internally need these groups but will create them based on - # this option for applications relying on their existence. - create_core_queue_groups = false -} - -# Queue options -queue: { - # Default minimum number of events that a queue can hold. - # - # This value will be used in queue creation (em_queue_create*()) if no - # other size information has been provided via the - # em_queue_conf_t::min_events parameter. - # Setting 'min_events_default = 0' will use the odp-implementation's - # default values (might vary from one odp-implementation to another). - min_events_default = 4096 - - priority: { - # Select the queue priority mapping mode (EM API to ODP) - # - # 0: legacy, map EM prios to ODP min/default/max, 3 prios only - # (old default used before adding the map_mode option) - # 1: map according to ODP runtime number of priorities - # (linear fit to full range of ODP priorities available) - # 2: custom priority mapping - # (use custom_map below) - # - map_mode = 1 - - # Custom priority map (required when map_mode = 2) - # - # This array needs to have EM_QUEUE_PRIO_NUM entries - # (typically 8). First entry is for lowest priority (0). - # The value is ADDED to odp_schedule_min_prio() and then passed - # to ODP, i.e. values are offsets from odp_schedule_min_prio(). - # Values given here must be valid for ODP runtime configuration, - # i.e. value plus odp_schedule_min_prio() must not exceed - # odp_schedule_max_prio(). - # - #custom_map = [0, 0, 1, 3 ,4 ,6 ,7, 7] - } -} - -# Event-Chaining options -event_chaining: { - # Note: - # The user _must_ provide an implementation for the overrideable - # 'event_send_device()' and 'event_send_device_multi()' functions in - # order to use Event-Chaining functionality! - # The functions are declared with '__attribute__((weak))' in the EM-lib - # to allow overriding. - # Linking user code, which includes overrides for those functions, - # against the EM-lib will replace the stubs with the user provided - # variants. - - # Number of queues used for ordering event-chaining events - # - # Maintain the order of events sent to an another device from within - # an EM ordered context (i.e. send from within the EO receive function - # when handling an event from a parallel-ordered queue). - # An event-chaining queue has no 'real' queue associated with it, the - # queue-id simply indicates that events sent to the queue should be - # sent out of EM via a user specified function (and perhaps out towards - # another device). Sending events out of EM requires some intervention, - # especially sending during an ordered context needs to maintain the - # event order as determined by the context. To maintain event order for - # event-chaining queues, em-odp uses a set of 'real' queues for order - # tracking - the number of these queues is set by the following option: - # (0 = no ordering) - num_order_queues = 8 -} - -# Event State Verification (ESV) options -# -# Note: Options only considered if ESV is enabled at compile-time -# via the configure option '--enable-esv', see configure.ac -esv: { - # Runtime ESV enable/disable option. - # Allows the user to disable ESV without recompilation when - # 'configure --enable-esv'. - enable = true - - # Store the valid state for the event at runtime each time the internal - # event state changes so that ESV errors can log both the previous - # valid state ('prev-state') and the last erroneous state ('new-state') - # when logging the error: - # - # EM Error log (send after free error): - # 1) esv.store_state = true: - # EM ERROR:0x80000010 ESCOPE:0xFF000201 EO:0x2-"eo1" - # ESV: Event:0x2007fa6822300 state error ... - # prev-state:em_free() core:01: EO:0x2-"eo1" Q:0x3-"Q1" ... - # => new-state:em_send() core:00: EO:0x2-"eo1" Q:0x3-"Q1" ... - # - # 2) esv.store_state = false (note "prev-state:n/a"): - # EM ERROR:0x80000010 ESCOPE:0xFF000201 EO:0x2-"eo1" - # ESV: Event:0x2007fa6822300 state error ... - # prev-state:n/a - # => new-state:em_send() core:00: EO:0x2-"eo1" Q:0x3-"Q1" ... - # - # Disabling (= false) might improve runtime performance at the cost of - # losing debug data when an ESV error occurs. - store_state = true - - # Store the first 32bits of the event payload during each valid - # event-state transition. This allows for a comparison of the payload - # content between the previous valid state and the invalid state. - # Note: The first 32bits of the payload will always be printed in the - # error log for the invalid state regardless of this setting. - # Enabling will impact performace somewhat. - store_payload_first_u32 = false - - # Preallocate all events in a pool during pool creation to set - # an initial ESV-state for each event that can be tracked over - # multiple allocs and frees. - prealloc_pools = true -} - -# EM Command Line Interface options -# -# Example usage: -# Access EM CLI server with telnet client -# $ telnet -# telnet> open 127.0.0.1 55555 # or localhost -# EM-ODP> help -# EM-ODP> em_core_print -cli: { - # Runtime cli enable/disable option. - # By default cli is disabled. To enable cli, set this to true. - enable = false - - # IP address to which the EM CLI server will be bound to. - # - # The IP address can be set to one of the following three types: - # - # 1. "127.0.0.1": will receive data only from the same machine. - # - # 2. Any one of the IP addresses of your machine. Command ifconfig - # can be used to get the IP addresses of your machine. - # - # 3. "0.0.0.0": will receive data sent to any IP address of your - # machine. - # - # The last two types of IP address can receive data from any other - # machine in the Internet. Note that this might introduce security - # concerns. - # - ip_addr = "127.0.0.1" # localhost - - # TCP port for the CLI server to receive data. - port = 55555 -} - -dispatch: { - # Poll interval for EM control events (in dispatch rounds) - # - # Rate limit EM control queue polling: - # Poll the EM internal unscheduled control queues for events every - # N:th dispatch round (where N is 'poll_ctrl_interval' here). - # The events polled for are related to EM API create/delete/sync calls - # etc. that need internal communication between the cores. - # - # 1) If 'poll_ctrl_interval = 1': polling is done every dispatch round. - # 2) If 'poll_ctrl_interval = N (>1)': Every N:th dispatch round check - # whether 'poll_ctrl_interval_ns' nanoseconds has passed since the - # previous poll, and if it has passed then poll the ctrl queues. - poll_ctrl_interval = 100 # check need to poll every Nth dispatch round - - # Poll interval for EM control events (in nanoseconds) - # - # Works together with 'poll_ctrl_interval' to limit polling: - # When 'poll_ctrl_interval' is larger than 1, use this option to limit - # the polling rate more exactly in nanoseconds. - # The previous option 'poll_ctrl_interval' is intended to limit the need - # to check absolute time(in ns) and thus maybe save some on performance, - # while this option serves to give more control over the polling rate in - # nanoseconds. - poll_ctrl_interval_ns = 1000000L # poll max every 1ms - - # Core local interval for calling input poll and output drain functions - # (in dispatch rounds) - # - # Rate limit EM poll and drain functions: - # Call the poll and/or drain functions for events every - # N:th dispatch round on a core (where N is 'poll_drain_interval' here). - # - # 1) If 'poll_drain_interval = 1': polling and draining is done every - # dispatch round. - # 2) If 'poll_drain_interval = N (>1)': Every N:th dispatch round check - # whether 'poll_drain_interval_ns' nanoseconds has passed since the - # previous poll/drain, and if it has passed then call the poll/drain - # functions. - poll_drain_interval = 1 - - # Core local interval for calling input poll and output drain functions - # (in nanoseconds) - # - # Works together with 'poll_drain_interval' to limit polling: - # When 'poll_drain_interval' is larger than 1, use this option to limit - # the poll/drain rate more exactly in nanoseconds per core. - # The previous option 'poll_drain_interval' is intended to limit the need - # to check absolute time(in ns) and thus maybe save some on performance, - # while this option serves to give more control over the poll/drain rate - # in nanoseconds. - poll_drain_interval_ns = 1000000L -} +# EM runtime configuration options +# +# This template configuration file (em-odp.conf) is hardcoded +# during the configure/build phase into em-odp and the values defined here are +# used at runtime unless overridden by the user with the optional environment +# variable EM_CONFIG_FILE=my-emodp.conf at program startup. +# +# This configuration file MUST include all configuration options. +# +# The environment variable EM_CONFIG_FILE can be used to override default values +# and it doesn't have to include all available options. The missing options are +# replaced with hardcoded default values. +# +# The options defined here are implementation specific and valid option +# values should be checked from the implementation code. + +# Mandatory fields +em_implementation = "em-odp" +config_file_version = "0.0.15" + +# Pool options +pool: { + # Enable EM pool usage statistics collection during runtime (true/false) + # + # Pool usage statistics is updated e.g. during em_alloc() and em_free(), + # thus, if enabled, inducing some overhead. Pool information can be + # obtained by calling em_pool_info() and, if pool usage statistics is + # enabled, will return also pool usage statistics. + statistics_enable = false + + # Default alignment offset in bytes for the event payload start address + # + # Set the event payload alignment offset for events allocated from + # any pool. This is a global setting concerning all pools. + # A similar, but pool-specific option, is 'em_pool_cfg_t::align_offset{}' + # that overrides this global setting for a specific pool when given to + # em_pool_create(..., pool_cfg). + # + # Use this option to globally adjust the payload layout so that a + # specific part of it can be placed at a needed alignment for e.g. + # HW access. + # + # The default EM event payload start address alignment is a power-of-two + # that is at minimum 32 bytes (i.e. 32 B, 64 B, 128 B etc. depending on + # e.g. target cache-line size). + # The 'align_offset' option can be used to fine-tune the start-address + # by a small offset to e.g. make room for a small SW header before the + # rest of the payload that might need a specific alignment for direct + # HW-access. + # Example: setting 'align_offset = 8' makes sure that the payload + # _after_ 8 bytes will be aligned at minimum (2^x) 32 bytes for all + # pools that do not override this value. + # + # start: base - align_offset + # | + # v + # +------------------------------------------+ + # | <----- | Event Payload | + # +------------------------------------------+ + # ^ + # | + # base (min 32B aligned, power-of-two) + align_offset = 0 + + # Default event user area size in bytes for all events from all pools. + # + # The event user area is located within the event metadata (hdr) and + # is not part of the event payload. The event user area can e.g. be + # used to store additional state data related to the event payload + # content. + # + # This is a global setting that can be overridden per pool using + # 'em_pool_cfg_t::user_area{}' with em_pool_create(). + # + user_area_size = 0 + + # Default minimum packet headroom in bytes for events allocated from + # EM-pools of type: EM_EVENT_TYPE_PACKET. Ignored for other pool types. + # + # This is a global setting for EM-pools of type EM_EVENT_TYPE_PACKET. + # A similar, but pool-specific option, is 'em_pool_cfg_t::pkt.headroom{}' + # that overrides this global setting for a specific pkt-pool when given + # to em_pool_create(..., pool_cfg). + # + # Use this option to globally set the minimum headroom in bytes for + # events/packets allocated from pkt-pools. Each event/packet will have + # at least this much headroom. + # + # 0: Explicitly set 'No headroom' for the pool. + # + # Max value is determined by the ODP implementation capabilities + # (a value larger than 'max' will lead to setup error) + # + # Note: using 'align_offset > 0' reduces the packet-headroom by the + # same amount, for example: + # "align_offset=8, pkt_headroom=128 ==> headroom= 128-8 = 120 B" + # + pkt_headroom = 128 +} + +queue_group: { + # Create the EM single-core queue groups (true/false) + # + # Select whether EM should create a queue group per EM-core. + # Corresponds to the queue groups with name: + # EM_QUEUE_GROUP_CORE_BASE_NAME + "%d" + # Each created queue group only has one core set in its core mask. + # + # EM earlier relied on these queue groups for internal core specific + # messaging and also allowed applicatioins to use them. Currently EM + # does not internally need these groups but will create them based on + # this option for applications relying on their existence. + create_core_queue_groups = false +} + +# Queue options +queue: { + # Default minimum number of events that a queue can hold. + # + # This value will be used in queue creation (em_queue_create*()) if no + # other size information has been provided via the + # em_queue_conf_t::min_events parameter. + # Setting 'min_events_default = 0' will use the odp-implementation's + # default values (might vary from one odp-implementation to another). + min_events_default = 4095 + + priority: { + # Select the queue priority mapping mode (EM API to ODP) + # + # 0: legacy, map EM prios to ODP min/default/max, 3 prios only + # (old default used before adding the map_mode option) + # 1: map according to ODP runtime number of priorities + # (linear fit to full range of ODP priorities available) + # 2: custom priority mapping + # (use custom_map below) + # + map_mode = 1 + + # Custom priority map (required when map_mode = 2) + # + # This array needs to have EM_QUEUE_PRIO_NUM entries + # (typically 8). First entry is for lowest priority (0). + # The value is ADDED to odp_schedule_min_prio() and then passed + # to ODP, i.e. values are offsets from odp_schedule_min_prio(). + # Values given here must be valid for ODP runtime configuration, + # i.e. value plus odp_schedule_min_prio() must not exceed + # odp_schedule_max_prio(). + # + #custom_map = [0, 0, 1, 3 ,4 ,6 ,7, 7] + } +} + +# Event-Chaining options +event_chaining: { + # Note: + # The user _must_ provide an implementation for the overrideable + # 'event_send_device()' and 'event_send_device_multi()' functions in + # order to use Event-Chaining functionality! + # The functions are declared with '__attribute__((weak))' in the EM-lib + # to allow overriding. + # Linking user code, which includes overrides for those functions, + # against the EM-lib will replace the stubs with the user provided + # variants. + + # Order calls to 'event_send_device()' and 'event_send_device_multi()' + # when events are sent during an ordered context - true/false. + # + # Note: Only meaningful when used together with (parallel-)ordered + # queues. Order is implicitly guaranteed when sent during an + # atomic context and never guaranteed when sent during a parallel + # context. + # Only set to 'true' if queues of type EM_QUEUE_TYPE_PARALLEL_ORDERED + # are used with event chaining, no benefit otherwise. + # + # Events sent with em_send...() to an event chaining queue will be + # passed to the user provided 'event_send_device/_multi()' functions. + # EM can ensure that calls to these functions are: + # 1) made in order (order_keep = true) + # or + # 2) make no extra effort to keep the order (order_keep = false), + # e.g. if the user (or packet output) handles ordering or does not + # care about it. + order_keep = false + + # Number of queues used for ordering event-chaining events + # + # Note: Ignored if 'order_keep = false' above. + # + # Maintain the order of events sent to an another device from within + # an EM ordered context (i.e. send from within the EO receive function + # when handling an event from a parallel-ordered queue). + # An event-chaining queue has no 'real' queue associated with it, the + # queue-id simply indicates that events sent to the queue should be + # sent out of EM via a user specified function (and perhaps out towards + # another device). Sending events out of EM requires some intervention, + # especially sending during an ordered context needs to maintain the + # event order as determined by the context. To maintain event order for + # event-chaining queues, em-odp uses a set of 'real' queues for order + # tracking - the number of these queues is set by the following option: + # (0 = no ordering) + num_order_queues = 8 +} + +# Event State Verification (ESV) options +# +# Note: Options only considered if ESV is enabled at compile-time +# via the configure option '--enable-esv', see configure.ac +esv: { + # Runtime ESV enable/disable option. + # Allows the user to disable ESV without recompilation when + # 'configure --enable-esv'. + enable = true + + # Store the valid state for the event at runtime each time the internal + # event state changes so that ESV errors can log both the previous + # valid state ('prev-state') and the last erroneous state ('new-state') + # when logging the error: + # + # EM Error log (send after free error): + # 1) esv.store_state = true: + # EM ERROR:0x80000010 ESCOPE:0xFF000201 EO:0x2-"eo1" + # ESV: Event:0x2007fa6822300 state error ... + # prev-state:em_free() core:01: EO:0x2-"eo1" Q:0x3-"Q1" ... + # => new-state:em_send() core:00: EO:0x2-"eo1" Q:0x3-"Q1" ... + # + # 2) esv.store_state = false (note "prev-state:n/a"): + # EM ERROR:0x80000010 ESCOPE:0xFF000201 EO:0x2-"eo1" + # ESV: Event:0x2007fa6822300 state error ... + # prev-state:n/a + # => new-state:em_send() core:00: EO:0x2-"eo1" Q:0x3-"Q1" ... + # + # Disabling (= false) might improve runtime performance at the cost of + # losing debug data when an ESV error occurs. + # + # Note: The state will not be stored for event references since they + # share the same state-data and would overwrite each other's state. + # + store_state = true + + # Store the first 32bits of the event payload during each valid + # event-state transition. This allows for a comparison of the payload + # content between the previous valid state and the invalid state. + # Note: The first 32bits of the payload will always be printed in the + # error log for the invalid state regardless of this setting. + # Enabling will impact performace somewhat. + store_payload_first_u32 = false + + # Preallocate all events in a pool during pool creation to set + # an initial ESV-state for each event that can be tracked over + # multiple allocs and frees. + prealloc_pools = true +} + +# EM Command Line Interface options +# +# Example usage: +# Access EM CLI server with telnet client +# $ telnet +# telnet> open 127.0.0.1 55555 # or localhost +# EM-ODP> help +# EM-ODP> em_core_print +cli: { + # Runtime cli enable/disable option. + # By default cli is disabled. To enable cli, set this to true. + enable = false + + # IP address to which the EM CLI server will be bound to. + # + # The IP address can be set to one of the following three types: + # + # 1. "127.0.0.1": will receive data only from the same machine. + # + # 2. Any one of the IP addresses of your machine. Command ifconfig + # can be used to get the IP addresses of your machine. + # + # 3. "0.0.0.0": will receive data sent to any IP address of your + # machine. + # + # The last two types of IP address can receive data from any other + # machine in the Internet. Note that this might introduce security + # concerns. + # + ip_addr = "127.0.0.1" # localhost + + # TCP port for the CLI server to receive data. + port = 55555 +} + +dispatch: { + # Poll interval for EM control events (in dispatch rounds) + # + # Rate limit EM control queue polling: + # Poll the EM internal unscheduled control queues for events every + # N:th dispatch round (where N is 'poll_ctrl_interval' here). + # The events polled for are related to EM API create/delete/sync calls + # etc. that need internal communication between the cores. + # + # 1) If 'poll_ctrl_interval = 1': polling is done every dispatch round. + # 2) If 'poll_ctrl_interval = N (>1)': Every N:th dispatch round check + # whether 'poll_ctrl_interval_ns' nanoseconds has passed since the + # previous poll, and if it has passed then poll the ctrl queues. + poll_ctrl_interval = 100 # check need to poll every Nth dispatch round + + # Poll interval for EM control events (in nanoseconds) + # + # Works together with 'poll_ctrl_interval' to limit polling: + # When 'poll_ctrl_interval' is larger than 1, use this option to limit + # the polling rate more exactly in nanoseconds. + # The previous option 'poll_ctrl_interval' is intended to limit the need + # to check absolute time(in ns) and thus maybe save some on performance, + # while this option serves to give more control over the polling rate in + # nanoseconds. + poll_ctrl_interval_ns = 1000000L # poll max every 1ms + + # Core local interval for calling input poll and output drain functions + # (in dispatch rounds) + # + # Rate limit EM poll and drain functions: + # Call the poll and/or drain functions for events every + # N:th dispatch round on a core (where N is 'poll_drain_interval' here). + # + # 1) If 'poll_drain_interval = 1': polling and draining is done every + # dispatch round. + # 2) If 'poll_drain_interval = N (>1)': Every N:th dispatch round check + # whether 'poll_drain_interval_ns' nanoseconds has passed since the + # previous poll/drain, and if it has passed then call the poll/drain + # functions. + poll_drain_interval = 1 + + # Core local interval for calling input poll and output drain functions + # (in nanoseconds) + # + # Works together with 'poll_drain_interval' to limit polling: + # When 'poll_drain_interval' is larger than 1, use this option to limit + # the poll/drain rate more exactly in nanoseconds per core. + # The previous option 'poll_drain_interval' is intended to limit the need + # to check absolute time(in ns) and thus maybe save some on performance, + # while this option serves to give more control over the poll/drain rate + # in nanoseconds. + poll_drain_interval_ns = 1000000L +} + +# Configure startup pool(s). Optional. When set, EM will create startup pool(s) +# according to the configuration given here during em_init(). If not given, only +# the default pool will be created. In this case, the default pool configuration +# is specified in the parameters of em_init(), more specifically, in the struct +# em_conf_t::default_pool_cfg. +# +# Note that if the default pool is configured here (e.g. either by setting +# startup_pools.conf[i].name to EM_POOL_DEFAULT_NAME ("default"), by setting +# startup_pools.conf[i].pool to EM_POOL_DEFAULT (1), or by setting both), it +# overrides the default pool configuration given in the parameter of +# em_init(em_conf_t conf). The default pool name or pool ID can not be combined +# with non-default pool IDs and names (e.g. setting startup_pools.conf[0].name +# to a non-default name while setting startup_pools.conf[0].pool to the default +# pool ID will fail). +# +# The priority regarding the default pool configuration is as follows: +# +# +--------------+ +-------------+ +-----------------------------+ +# | Runtime.conf | > | em-odp.conf | > | em_conf_t::default_pool_cfg | +# +--------------+ +-------------+ +-----------------------------+ +# +# The default pool configuration specified in runtime configuration file +# overrides the one given in the default configuration file(em-odp.conf), which +# overrides the one passed as a parameter to em_init(). +# +# Valid values for 'event_type' are ("stringified" versions of EM constants): +# "EM_EVENT_TYPE_SW" +# "EM_EVENT_TYPE_PACKET" +# "EM_EVENT_TYPE_VECTOR" +# +# Note that not all fields are mandatory, most of them are optional. Refer +# em_pool_cfg_t and em_pool_create() for a more detailed explanation of each +# option field. +# +# Note also that these are pool specific configurations given directly to +# em_pool_create(name, pool, pool_cfg), thus overriding the global settings +# such as pool.align_offset, pool.user_area_size, and pool.pkt_headroom set +# above. +# +# The startup pools will be deleted during em_term(). +# +#startup_pools: { +# # Number of startup pools. +# # The number of pool configs in the 'conf' below must match this number. +# # This number must be within the range: [1, EM_CONFIG_POOLS - 1]. +# num = 1 # Mandatory +# +# # Pool configurations +# conf: ({ +# # Pool name. +# # Using "default" overrides the default pool config given +# # to em_init(). +# name = "default" # Optional +# +# # Pool ID. +# # Setting to 1 (EM_POOL_DEFAULT) overrides the default +# # pool config given to em_init(). +# # The pool ID must be within the range [0, EM_CONFIG_POOLS]. +# # Note that setting 'pool' to 0 (EM_POOL_UNDEF) has the same +# # effect as leaving this out, in which case, EM will decide the +# # pool ID. +# pool = 1 # Optional +# +# # Pool configurations. Corresponds to em_pool_cfg_t. +# pool_cfg: { +# # Event type +# event_type = "EM_EVENT_TYPE_SW" # Mandatory +# +# # Alignment offset +# # Optional, but when 'align_offset' is used, both +# # 'in_use' and 'value' must be given. So either leave +# # all out or give the full setting. +# # 'in_use = true', 'value = 0' --> set explicitly to 0 +# align_offset: { +# in_use = true +# value = 0 # Must be power of 2 +# } # Optional +# +# # Event user area +# # Optional, but when 'user_area' is used, both 'in_use' +# # and 'size' must be given. So either leave all out or +# # give full setting. +# # 'in_use = true', 'value = 0' --> set explicitly to 0 +# user_area: { +# in_use = true +# size = 0 +# } # Optional +# +# # Valid only for EM_EVENT_TYPE_PACKET. +# pkt: { +# # Pool-specific packet minimum headroom +# # Optional, but when headrrom is used, both +# # 'in_use' and 'value' must be given. So either +# # leave all out or give full setting. +# # 'in_use = true', 'value = 0' --> no headroom +# headroom: { +# in_use = false +# value = 0 +# } +# } # Optional +# +# # Number of subpools. +# # The number of subpool settings in 'subpools' below +# # must match this number. This number must be within +# # the range [1, EM_MAX_SUBPOOLS]. +# num_subpools = 4 # Mandatory +# +# # Subpool settings. +# subpools: ({ +# size = 256 # Mandatory +# num = 16384 # Mandatory +# cache_size = 64 # Optional +# }, +# { +# size = 512 # Mandatory +# num = 1024 # Mandatory +# cache_size = 32 # Optional +# }, +# { +# size = 1024 # Mandatory +# num = 1024 # Mandatory +# cache_size = 16 # Optional +# }, +# { +# size = 2048 # Mandatory +# num = 1024 # Mandatory +# cache_size = 8 # Optional +# }) +# } +# }) +#} diff --git a/configure.ac b/configure.ac index 5240ef07..241a7944 100644 --- a/configure.ac +++ b/configure.ac @@ -2,8 +2,8 @@ AC_PREREQ([2.69]) ############################ # Version ############################ -m4_define([em_version_api_major], [2]) -m4_define([em_version_api_minor], [9]) +m4_define([em_version_api_major], [3]) +m4_define([em_version_api_minor], [0]) m4_define([em_version_implementation], [0]) m4_define([em_version_fix], [0]) @@ -285,6 +285,38 @@ AC_ARG_ENABLE([debug-print], # Substitute @EM_DEBUG_PRINT@ into the pkgconfig file libemodp.pc.in AC_SUBST([EM_DEBUG_PRINT]) +######################################################################### +# Override EM-define value 'EM_DEBUG_TIMESTAMP_ENABLE' to 0...2 +# --enable-debug-timestamps=0...2 Set 'EM_DEBUG_TIMESTAMP_ENABLE' to the given +# value (1=low overhead, 2=strict time) +# --enable-debug-timestamps<=yes (=1) Set 'EM_DEBUG_TIMESTAMP_ENABLE' to 1 +# --enable-debug-timestamps Set 'EM_DEBUG_TIMESTAMP_ENABLE' to 1 +# --disable-debug-timestamps or +# no option given Use '#define EM_DEBUG_TIMESTAMP_ENABLE 0...2' from +# source code (default 0) +debug_timestamps_info="no (EM_DEBUG_TIMESTAMP_ENABLE from source code)" +EM_DEBUG_TIMESTAMP_ENABLE="" +AC_ARG_ENABLE([debug-timestamps], + [AS_HELP_STRING([--enable-debug-timestamps[[=VAL]]], + [Override the 'EM_DEBUG_TIMESTAMP_ENABLE' define, valid + values are 0...2, yes(=1) or no value(=1), 2=strict time + [default=disabled, value from source code]])], + [AS_IF([test "$enableval" -ge 0 -a "$enableval" -le 2 2>/dev/null], + [debug_timestamps_info="yes:$enableval (EM_DEBUG_TIMESTAMP_ENABLE=$enableval)" + EM_DEBUG_TIMESTAMP_ENABLE="-DEM_DEBUG_TIMESTAMP_ENABLE=$enableval"], + + [test "x$enableval" = "xyes"], + [debug_timestamps_info="yes:1 (EM_DEBUG_TIMESTAMP_ENABLE=$enableval)" + EM_DEBUG_TIMESTAMP_ENABLE="-DEM_DEBUG_TIMESTAMP_ENABLE=1"], + + [test "x$enableval" != "xno"], + [AC_MSG_ERROR([bad value ${enableval} for --enable-debug-timestamps])] + ) + EM_CPPFLAGS="$EM_CPPFLAGS $EM_DEBUG_TIMESTAMP_ENABLE" + ],[]) +# Substitute @EM_DEBUG_TIMESTAMP_ENABLE@ into the pkgconfig file libemodp.pc.in +AC_SUBST([EM_DEBUG_TIMESTAMP_ENABLE]) + ########################################################################## # Set optional libcli path ########################################################################## @@ -326,6 +358,38 @@ EM_CPPFLAGS="$EM_CPPFLAGS $EM_CLI" LIBS=$OLD_LIBS CPPFLAGS=$OLD_CPPFLAGS +######################################################################### +# Enable EM idle hooks +######################################################################### +# --enable-idle-hooks Set 'EM_IDLE_HOOKS_ENABLE=1' +# --disable-idle-hooks Set 'EM_IDLE_HOOKS_ENABLE=0' +# no option given Use '#define EM_IDLE_HOOKS_ENABLE 0|1' from source code +# +# Note: Using the option --enable-idle-hooks OR --disable-idle-hooks will _override_ +# the source code define value for '#define EM_IDLE_HOOKS_ENABLE 0|1' +# +idle_hooks_info="no (EM_IDLE_HOOKS_ENABLE from source code)" +EM_IDLE_HOOKS_ENABLE="" +AC_ARG_ENABLE([idle-hooks], + [AS_HELP_STRING([--enable-idle-hooks], + [Enable Idle Hooks + [default=value from source code '#define EM_IDLE_HOOKS_ENABLE 0|1']])], + [AS_IF(dnl --enable-idle-hooks[=yes]: + [test "x$enableval" = "xyes"], + [idle_hooks_info="$enableval (EM_IDLE_HOOKS_ENABLE=1)" + EM_IDLE_HOOKS_ENABLE="-DEM_IDLE_HOOKS_ENABLE=1"], + dnl --disable-idle-hooks OR --enable-idle-hooks=no: + [test "x$enableval" = "xno"], + [idle_hooks_info="$enableval (EM_IDLE_HOOKS_ENABLE=0)" + EM_IDLE_HOOKS_ENABLE="-DEM_IDLE_HOOKS_ENABLE=0"], + dnl unsupported value given: + [AC_MSG_ERROR([bad value --enable-idle-hooks=${enableval}, use yes/no])] + ) + EM_CPPFLAGS="$EM_CPPFLAGS $EM_IDLE_HOOKS_ENABLE" + ],[]) +# Substitute @EM_IDLE_HOOKS_ENABLE@ into the pkgconfig file libemodp.pc.in +AC_SUBST([EM_IDLE_HOOKS_ENABLE]) + ########################################################################## # Default include setup ########################################################################## @@ -584,5 +648,7 @@ AC_MSG_RESULT([ EM check level: ${check_level_info} EM ESV: ${esv_info} EM debug print: ${debug_print_info} + EM debug timestamps: ${debug_timestamps_info} EM CLI: ${em_cli} + EM idle hooks: ${idle_hooks_info} ]) diff --git a/doc/Doxyfile b/doc/Doxyfile index 7f88af35..afb214ba 100644 --- a/doc/Doxyfile +++ b/doc/Doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.17 +# Doxyfile 1.9.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -227,6 +227,14 @@ QT_AUTOBRIEF = NO MULTILINE_CPP_IS_BRIEF = NO +# By default Python docstrings are displayed as preformatted text and doxygen's +# special commands cannot be used. By setting PYTHON_DOCSTRING to NO the +# doxygen's special commands can be used and the contents of the docstring +# documentation blocks is shown as doxygen documentation. +# The default value is: YES. + +PYTHON_DOCSTRING = YES + # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. @@ -263,12 +271,6 @@ TAB_SIZE = 8 ALIASES = -# This tag can be used to specify a number of word-keyword mappings (TCL only). -# A mapping has the form "name=value". For example adding "class=itcl::class" -# will allow you to use the command class in the itcl::class meaning. - -TCL_SUBST = - # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all @@ -310,18 +312,21 @@ OPTIMIZE_OUTPUT_SLICE = NO # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, JavaScript, -# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, +# Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, VHDL, # Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser # tries to guess whether the code is fixed or free formatted code, this is the -# default for Fortran type files), VHDL, tcl. For instance to make doxygen treat -# .inc files as Fortran files (default is PHP), and .f files as C (default is -# Fortran), use: inc=Fortran f=C. +# default for Fortran type files). For instance to make doxygen treat .inc files +# as Fortran files (default is PHP), and .f files as C (default is Fortran), +# use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise -# the files are not read by doxygen. +# the files are not read by doxygen. When specifying no_extension you should add +# * to the FILE_PATTERNS. +# +# Note see also the list of default file extension mappings. EXTENSION_MAPPING = @@ -455,6 +460,19 @@ TYPEDEF_HIDES_STRUCT = NO LOOKUP_CACHE_SIZE = 0 +# The NUM_PROC_THREADS specifies the number threads doxygen is allowed to use +# during processing. When set to 0 doxygen will based this on the number of +# cores available in the system. You can set it explicitly to a value larger +# than 0 to get more control over the balance between CPU load and processing +# speed. At this moment only the input processing can be done using multiple +# threads. Since this is still an experimental feature the default is set to 1, +# which efficively disables parallel processing. Please report any issues you +# encounter. Generating dot graphs in parallel is controlled by the +# DOT_NUM_THREADS setting. +# Minimum value: 0, maximum value: 32, default value: 1. + +NUM_PROC_THREADS = 1 + #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- @@ -518,6 +536,13 @@ EXTRACT_LOCAL_METHODS = NO EXTRACT_ANON_NSPACES = NO +# If this flag is set to YES, the name of an unnamed parameter in a declaration +# will be determined by the corresponding definition. By default unnamed +# parameters remain unnamed in the output. +# The default value is: YES. + +RESOLVE_UNNAMED_PARAMS = YES + # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation @@ -555,11 +580,18 @@ HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO -# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES, upper-case letters are also -# allowed. This is useful if you have classes or files whose names only differ -# in case and if your file system supports case sensitive file names. Windows -# (including Cygwin) ands Mac users are advised to set this option to NO. +# With the correct setting of option CASE_SENSE_NAMES doxygen will better be +# able to match the capabilities of the underlying filesystem. In case the +# filesystem is case sensitive (i.e. it supports files in the same directory +# whose names only differ in casing), the option must be set to YES to properly +# deal with such files in case they appear in the input. For filesystems that +# are not case sensitive the option should be be set to NO to properly deal with +# output files written for symbols that only differ in casing, such as for two +# classes, one named CLASS and the other named Class, and to also support +# references to files without having to specify the exact matching casing. On +# Windows (including Cygwin) and MacOS, users should typically set this option +# to NO, whereas on Linux or other Unix flavors it should typically be set to +# YES. # The default value is: system dependent. CASE_SENSE_NAMES = YES @@ -798,7 +830,10 @@ WARN_IF_DOC_ERROR = YES WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when -# a warning is encountered. +# a warning is encountered. If the WARN_AS_ERROR tag is set to FAIL_ON_WARNINGS +# then doxygen will continue running as if WARN_AS_ERROR tag is set to NO, but +# at the end of the doxygen process doxygen will return with a non-zero status. +# Possible values are: NO, YES and FAIL_ON_WARNINGS. # The default value is: NO. WARN_AS_ERROR = NO @@ -836,8 +871,8 @@ INPUT = $(SRCDIR)/include \ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv -# documentation (see: https://www.gnu.org/software/libiconv/) for the list of -# possible encodings. +# documentation (see: +# https://www.gnu.org/software/libiconv/) for the list of possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 @@ -850,13 +885,15 @@ INPUT_ENCODING = UTF-8 # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # +# Note the list of default checked file patterns might differ from the list of +# default file extension mappings. +# # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox (to be provided as doxygen C comment), -# *.doc (to be provided as doxygen C comment), *.txt (to be provided as doxygen -# C comment), *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f, *.for, *.tcl, *.vhd, -# *.vhdl, *.ucf, *.qsf and *.ice. +# *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, *.f18, *.f, *.for, *.vhd, *.vhdl, +# *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.c \ *.cc \ @@ -1117,16 +1154,22 @@ USE_HTAGS = NO VERBATIM_HEADERS = YES # If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the -# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the -# cost of reduced performance. This can be particularly helpful with template -# rich C++ code for which doxygen's built-in parser lacks the necessary type -# information. +# clang parser (see: +# http://clang.llvm.org/) for more accurate parsing at the cost of reduced +# performance. This can be particularly helpful with template rich C++ code for +# which doxygen's built-in parser lacks the necessary type information. # Note: The availability of this option depends on whether or not doxygen was # generated with the -Duse_libclang=ON option for CMake. # The default value is: NO. CLANG_ASSISTED_PARSING = NO +# If clang assisted parsing is enabled and the CLANG_ADD_INC_PATHS tag is set to +# YES then doxygen will add the directory of each input to the include path. +# The default value is: YES. + +CLANG_ADD_INC_PATHS = YES + # If clang assisted parsing is enabled you can provide the compiler with command # line options that you would normally use when invoking the compiler. Note that # the include paths will already be set by doxygen for the files and directories @@ -1136,10 +1179,13 @@ CLANG_ASSISTED_PARSING = NO CLANG_OPTIONS = # If clang assisted parsing is enabled you can provide the clang parser with the -# path to the compilation database (see: -# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) used when the files -# were built. This is equivalent to specifying the "-p" option to a clang tool, -# such as clang-check. These options will then be passed to the parser. +# path to the directory containing a file called compile_commands.json. This +# file is the compilation database (see: +# http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html) containing the +# options used when the source files were built. This is equivalent to +# specifying the -p option to a clang tool, such as clang-check. These options +# will then be passed to the parser. Any options specified with CLANG_OPTIONS +# will be added as well. # Note: The availability of this option depends on whether or not doxygen was # generated with the -Duse_libclang=ON option for CMake. @@ -1156,13 +1202,6 @@ CLANG_DATABASE_PATH = ALPHABETICAL_INDEX = YES -# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in -# which the alphabetical index list will be split. -# Minimum value: 1, maximum value: 20, default value: 5. -# This tag requires that the tag ALPHABETICAL_INDEX is set to YES. - -COLS_IN_ALPHA_INDEX = 5 - # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored @@ -1333,10 +1372,11 @@ HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development -# environment (see: https://developer.apple.com/xcode/), introduced with OSX -# 10.5 (Leopard). To create a documentation set, doxygen will generate a -# Makefile in the HTML output directory. Running make will produce the docset in -# that directory and running make install will install the docset in +# environment (see: +# https://developer.apple.com/xcode/), introduced with OSX 10.5 (Leopard). To +# create a documentation set, doxygen will generate a Makefile in the HTML +# output directory. Running make will produce the docset in that directory and +# running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. @@ -1378,8 +1418,8 @@ DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop -# (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on -# Windows. +# (see: +# https://www.microsoft.com/en-us/download/details.aspx?id=21138) on Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML @@ -1409,7 +1449,7 @@ CHM_FILE = HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated -# (YES) or that it should be included in the master .chm file (NO). +# (YES) or that it should be included in the main .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1454,7 +1494,8 @@ QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace -# (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). +# (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1462,8 +1503,8 @@ QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual -# Folders (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- -# folders). +# Folders (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual-folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. @@ -1471,16 +1512,16 @@ QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom -# Filters (see: https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- -# filters). +# Filters (see: +# https://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom-filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = @@ -1492,9 +1533,9 @@ QHP_CUST_FILTER_ATTRS = QHP_SECT_FILTER_ATTRS = -# The QHG_LOCATION tag can be used to specify the location of Qt's -# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the -# generated .qhp file. +# The QHG_LOCATION tag can be used to specify the location (absolute path +# including file name) of Qt's qhelpgenerator. If non-empty doxygen will try to +# run qhelpgenerator on the generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = @@ -1571,6 +1612,17 @@ TREEVIEW_WIDTH = 250 EXT_LINKS_IN_WINDOW = NO +# If the HTML_FORMULA_FORMAT option is set to svg, doxygen will use the pdf2svg +# tool (see https://github.com/dawbarton/pdf2svg) or inkscape (see +# https://inkscape.org) to generate formulas as SVG images instead of PNGs for +# the HTML output. These images will generally look nicer at scaled resolutions. +# Possible values are: png (the default) and svg (looks nicer but requires the +# pdf2svg or inkscape tool). +# The default value is: png. +# This tag requires that the tag GENERATE_HTML is set to YES. + +HTML_FORMULA_FORMAT = png + # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML @@ -1610,7 +1662,7 @@ USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: -# http://docs.mathjax.org/en/latest/output.html) for more details. +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. @@ -1626,7 +1678,7 @@ MATHJAX_FORMAT = HTML-CSS # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from https://www.mathjax.org before deployment. -# The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/. +# The default value is: https://cdn.jsdelivr.net/npm/mathjax@2. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest @@ -1640,7 +1692,8 @@ MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site -# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an +# (see: +# http://docs.mathjax.org/en/v2.7-latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. @@ -1687,7 +1740,8 @@ SERVER_BASED_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). +# Xapian (see: +# https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. @@ -1700,8 +1754,9 @@ EXTERNAL_SEARCH = NO # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library -# Xapian (see: https://xapian.org/). See the section "External Indexing and -# Searching" for details. +# Xapian (see: +# https://xapian.org/). See the section "External Indexing and Searching" for +# details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = @@ -1865,9 +1920,11 @@ LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES -# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES, to get a -# higher quality PDF documentation. +# If the USE_PDFLATEX tag is set to YES, doxygen will use the engine as +# specified with LATEX_CMD_NAME to generate the PDF file directly from the LaTeX +# files. Set this option to YES, to get a higher quality PDF documentation. +# +# See also section LATEX_CMD_NAME for selecting the engine. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -2378,10 +2435,32 @@ UML_LOOK = NO # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. -# This tag requires that the tag HAVE_DOT is set to YES. +# This tag requires that the tag UML_LOOK is set to YES. UML_LIMIT_NUM_FIELDS = 10 +# If the DOT_UML_DETAILS tag is set to NO, doxygen will show attributes and +# methods without types and arguments in the UML graphs. If the DOT_UML_DETAILS +# tag is set to YES, doxygen will add type and arguments for attributes and +# methods in the UML graphs. If the DOT_UML_DETAILS tag is set to NONE, doxygen +# will not generate fields with class member information in the UML graphs. The +# class diagrams will look similar to the default class diagrams but using UML +# notation for the relationships. +# Possible values are: NO, YES and NONE. +# The default value is: NO. +# This tag requires that the tag UML_LOOK is set to YES. + +DOT_UML_DETAILS = NO + +# The DOT_WRAP_THRESHOLD tag can be used to set the maximum number of characters +# to display on a single line. If the actual line length exceeds this threshold +# significantly it will wrapped across multiple lines. Some heuristics are apply +# to avoid ugly line breaks. +# Minimum value: 0, maximum value: 1000, default value: 17. +# This tag requires that the tag HAVE_DOT is set to YES. + +DOT_WRAP_THRESHOLD = 17 + # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. @@ -2530,7 +2609,7 @@ PLANTUML_INCLUDE_PATH = # Minimum value: 0, maximum value: 10000, default value: 50. # This tag requires that the tag HAVE_DOT is set to YES. -DOT_GRAPH_MAX_NODES = 50 +DOT_GRAPH_MAX_NODES = 500 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs # generated by dot. A depth value of 3 means that only nodes reachable from the @@ -2573,9 +2652,11 @@ DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate # files that are used to generate the various graphs. +# +# Note: This setting is not only used for dot files but also for msc and +# plantuml temporary files. # The default value is: YES. -# This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES diff --git a/include/event_machine.h b/include/event_machine.h index 6df8db88..d2ef4262 100644 --- a/include/event_machine.h +++ b/include/event_machine.h @@ -1,266 +1,274 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_H -#define EVENT_MACHINE_H - -#pragma GCC visibility push(default) - -/** - * @file - * Event Machine API - * - * This file includes all other needed EM headers - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** @mainpage - * - * @section section_1 General - * Event Machine (EM) is a framework and an architectural abstraction of an - * event driven, multicore optimized, processing concept originally developed - * for the networking data plane. It offers an easy programming concept for - * scalable and dynamically load balanced multicore applications with a very - * low overhead run-to-completion principle. - * - * Events, queues and execution objects (EO) along with the scheduler and the - * dispatcher form the main elements of the EM concept. An event is an - * application specific piece of data (like a message or a network packet) - * describing work, something to do. All processing in EM must be triggered by - * an event. Events are sent to asynchronous application specific EM queues. - * A dispatcher loop is run by a single thread on each core in the EM instance - * ("core" is used here to refer to a core or one HW thread on multi-threaded - * cores). The dispatcher on each core interfaces with the scheduler and asks - * for an event to process. The scheduler then evaluates the state of all the - * EM queues and gives the highest priority event available to the requesting - * dispatcher. The dispatcher looks up which EO owns the queue that the event - * came from and finally calls the EO's registered receive function to deliver - * the event for processing. When the event has been handled and the EO's - * receive function returns, it's again time for the dispatcher on that core to - * request another event from the scheduler and deliver it to the corresponding - * EO. The aforedescribed scenario happens in parallel on all cores running the - * EM instance. Events originating from a particular queue might thus be given - * for processing on any core, decided separately for each event by the - * scheduler as the dispatcher on a core requests more work - this is per-event - * dynamic load-balancing. EM contains mechanisms to ensure atomicity and event - * (re-)ordering. - * - * The EM concept has been designed to be highly efficient, operating in a - * run-to-completion manner on each participating core with neither context - * switching nor pre-emption slowing down the event processing loops. - * EM can run on bare metal for best performance or under an operating system - * with special arrangements (e.g. one thread per core with thread affinity). - * - * The concept and the API are intended to allow fairly easy implementations on - * general purpose or networking oriented multicore packet processing SoCs, - * which typically also contain accelerators for packet processing needs. - * Efficient integration with modern HW accelerators has been a major driver of - * the EM concept. - * - * One general principle of the EM API is that the function calls are mostly - * multicore safe. The application still needs to consider parallel processing - * data hazards and race conditions unless explicitly documented in the API for - * the function call in question. For example, one core might ask for a queue - * context while another core changes it, thus the returned context may be - * invalid (valid data, but either the old or the new value is returned). Thus - * modifications of shared state or data should be protected by an atomic - * context (if load balancing is used) or otherwise synchronized by the - * application itself. One simple way to achieve atomic processing is to use an - * atomic queue to serialize the EO's incoming events and perform management - * operations in the EO's receive function. This serialization limits the - * throughput of the atomic queue in question to the equivalent throughput of a - * single core, but since normally EM applications use multiple queues, all - * cores should get events to process and the total throughput will be relative - * to the number of cores running the EM instance. - * - * EM_64_BIT or EM_32_BIT (needs to be defined by the build) defines whether - * (most of) the types used in the API are 32 or 64 bits wide. NOTE, that this - * is a major decision, since it may limit value passing between different - * systems using the defined types directly. Using 64-bits may allow for a more - * efficient underlying implementation, as e.g. more data can be coded in - * 64-bit identifiers. - * - * @section section_2 Principles - * - This API attempts to guide towards a portable application architecture, - * but is not defined for portability by re-compilation. Many things are system - * specific giving more possibilities for efficient use of HW resources. - * - EM does not define event content (one exception, see em_alloc()). This is - * a choice made for performance reasons, since most HW devices use proprietary - * descriptors. This API enables the usage of those directly. - * - EM does not define a detailed queue scheduling discipline or an API to set - * it up with (or actually anything to configure a system). The priority value - * in this API is a (mapped) system specific QoS class label only. - * - In general, EM does not implement a full SW platform or a middleware - * solution, it implements a subset - a driver level part. For best - * performance it can be used directly from the applications. - * - * @section section_3 Inter-system communication - * EM does not define how to communicate with another EM instance or another - * system transparently. However, this is a typical need and the current API - * does have ways to achieve almost transparent communication between systems - * ("event chaining"): - * Since the queue identifier is a system specific value, it is easy to encode - * extra information into it in the EM implementation. For instance it could be - * split into two parts, where the lower part is a local queue id or index and - * the higher part, if not zero, points to another system. The implementation - * of em_send() can detect a non-local queue and forward events to the target - * using any transport mechanism available and once at the target instance the - * lower part is used to map to a local queue. For the application nothing - * changes. The problem is the lack of shared memory between those systems. - * The given event can be fully copied, but it should not have any references to - * sender's local memory. Thus it is not fully transparent if the event contains - * references to local memory (e.g. pointers). - * - * @section section_4 Files - * @subsection sub_1 Generic - * - event_machine.h - * - Event Machine API - * The application should include this file only. - * - * Files included by event_machine.h: - * - event_machine_version.h (included by event_machine.h) - * - Event Machine version defines, macros and APIs - * - event_machine_types.h (included by event_machine.h) - * - Event Machine basic types - * - event_machine_event.h (included by event_machine.h) - * - event related functionality - * - event_machine_eo.h (included by event_machine.h) - * - EO related functionality - * - event_machine_event_group.h (included by event_machine.h) - * - event group feature for fork-join type of operations using events - * - event_machine_atomic_group.h (included by event_machine.h) - * - functionality for atomic groups of queues (API 1.1) - * - event_machine_queue.h (included by event_machine.h) - * - queue related functionality - * - event_machine_queue_group.h (included by event_machine.h) - * - queue group related functionality - * - event_machine_error.h (included by event_machine.h) - * - error management related functionality - * - event_machine_core.h (included by event_machine.h) - * - core/thread related functionality - * - event_machine_scheduler.h (included by event_machine.h) - * - scheduling related functionality - * - event_machine_dispatcher.h (included by event_machine.h) - * - dispatching related functionality - * - * @subsection sub_2 HW Specific - * - event_machine_config.h (included by event_machine.h) - * - Event Machine constants and configuration options - * - event_machine_hw_config.h (included by event_machine.h) - * - HW specific constants and configuration options - * - event_machine_hw_types.h (included by event_machine.h) - * - HW specific types - * - event_machine_init.h (included by event_machine.h) - * - Event Machine initialization - * - event_machine_pool.h (included by event_machine.h) - * - event pool related functionality - * - event_machine_hw_specific.h (included by event_machine.h) - * - HW specific functions and macros - * - * @subsection sub_3 Helper - * - event_machine_helper.h - * - optional helper routines - * - * @example hello.c - * @example dispatcher_callback.c - * @example error.c - * @example event_group.c - * @example event_group_abort.c - * @example event_group_assign_end.c - * @example event_group_chaining.c - * @example fractal.c - * @example ordered.c - * @example queue_types_ag.c - * @example queue_types_local.c - * @example queue_group.c - * add-ons: - * @example timer_hello.c - * @example timer_test.c - * performance: - * @example atomic_processing_end.c - * @example pairs.c - * @example queue_groups.c - * @example queues.c - * @example queues_unscheduled.c - * @example queues_local.c - * @example send_multi.c - */ - -/* EM version */ -#include - -/* EM config & types */ -#include -#include - -/* HW specific EM config & types */ -#include -#include - -/* EM error management */ -#include -/* EM Execution Object (EO) related functions */ -#include -/* EM Queue functions */ -#include -/* EM Queue Group functions */ -#include -/* EM Core functions*/ -#include -/* EM Event functions */ -#include -/* EM Atomic Group functions */ -#include -/* EM Event Group functions */ -#include -/* EM Scheduler functions */ -#include -/* EM Dispatcher functions */ -#include - -/* EM Event Pool functions */ -#include -/* EM API hooks */ -#include -/* EM initialization and termination */ -#include -/* Other HW/Platform specific functions */ -#include - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_H +#define EVENT_MACHINE_H + +#pragma GCC visibility push(default) + +/** + * @file + * Event Machine API + * + * This file includes all other needed EM headers + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** @mainpage + * + * @section section_1 General + * Event Machine (EM) is a framework and an architectural abstraction of an + * event driven, multicore optimized, processing concept originally developed + * for the networking data plane. It offers an easy programming concept for + * scalable and dynamically load balanced multicore applications with a very + * low overhead run-to-completion principle. + * + * Events, queues and execution objects (EO) along with the scheduler and the + * dispatcher form the main elements of the EM concept. An event is an + * application specific piece of data (like a message or a network packet) + * describing work, something to do. All processing in EM must be triggered by + * an event. Events are sent to asynchronous application specific EM queues. + * A dispatcher loop is run by a single thread on each core in the EM instance + * ("core" is used here to refer to a core or one HW thread on multi-threaded + * cores). The dispatcher on each core interfaces with the scheduler and asks + * for an event to process. The scheduler then evaluates the state of all the + * EM queues and gives the highest priority event available to the requesting + * dispatcher. The dispatcher looks up which EO owns the queue that the event + * came from and finally calls the EO's registered receive function to deliver + * the event for processing. When the event has been handled and the EO's + * receive function returns, it's again time for the dispatcher on that core to + * request another event from the scheduler and deliver it to the corresponding + * EO. The aforedescribed scenario happens in parallel on all cores running the + * EM instance. Events originating from a particular queue might thus be given + * for processing on any core, decided separately for each event by the + * scheduler as the dispatcher on a core requests more work - this is per-event + * dynamic load-balancing. EM contains mechanisms to ensure atomicity and event + * (re-)ordering. + * + * The EM concept has been designed to be highly efficient, operating in a + * run-to-completion manner on each participating core with neither context + * switching nor pre-emption slowing down the event processing loops. + * EM can run on bare metal for best performance or under an operating system + * with special arrangements (e.g. one thread per core with thread affinity). + * + * The concept and the API are intended to allow fairly easy implementations on + * general purpose or networking oriented multicore packet processing SoCs, + * which typically also contain accelerators for packet processing needs. + * Efficient integration with modern HW accelerators has been a major driver of + * the EM concept. + * + * One general principle of the EM API is that the function calls are mostly + * multicore safe. The application still needs to consider parallel processing + * data hazards and race conditions unless explicitly documented in the API for + * the function call in question. For example, one core might ask for a queue + * context while another core changes it, thus the returned context may be + * invalid (valid data, but either the old or the new value is returned). Thus + * modifications of shared state or data should be protected by an atomic + * context (if load balancing is used) or otherwise synchronized by the + * application itself. One simple way to achieve atomic processing is to use an + * atomic queue to serialize the EO's incoming events and perform management + * operations in the EO's receive function. This serialization limits the + * throughput of the atomic queue in question to the equivalent throughput of a + * single core, but since normally EM applications use multiple queues, all + * cores should get events to process and the total throughput will be relative + * to the number of cores running the EM instance. + * + * EM_64_BIT or EM_32_BIT (needs to be defined by the build) defines whether + * (most of) the types used in the API are 32 or 64 bits wide. NOTE, that this + * is a major decision, since it may limit value passing between different + * systems using the defined types directly. Using 64-bits may allow for a more + * efficient underlying implementation, as e.g. more data can be coded in + * 64-bit identifiers. + * + * @section section_2 Principles + * - This API attempts to guide towards a portable application architecture, + * but is not defined for portability by re-compilation. Many things are system + * specific giving more possibilities for efficient use of HW resources. + * - EM does not define event content (one exception, see em_alloc()). This is + * a choice made for performance reasons, since most HW devices use proprietary + * descriptors. This API enables the usage of those directly. + * - EM does not define a detailed queue scheduling discipline or an API to set + * it up with (or actually anything to configure a system). The priority value + * in this API is a (mapped) system specific QoS class label only. + * - In general, EM does not implement a full SW platform or a middleware + * solution, it implements a subset - a driver level part. For best + * performance it can be used directly from the applications. + * + * @section section_3 Inter-system communication + * EM does not define how to communicate with another EM instance or another + * system transparently. However, this is a typical need and the current API + * does have ways to achieve almost transparent communication between systems + * ("event chaining"): + * Since the queue identifier is a system specific value, it is easy to encode + * extra information into it in the EM implementation. For instance it could be + * split into two parts, where the lower part is a local queue id or index and + * the higher part, if not zero, points to another system. The implementation + * of em_send() can detect a non-local queue and forward events to the target + * using any transport mechanism available and once at the target instance the + * lower part is used to map to a local queue. For the application nothing + * changes. The problem is the lack of shared memory between those systems. + * The given event can be fully copied, but it should not have any references to + * sender's local memory. Thus it is not fully transparent if the event contains + * references to local memory (e.g. pointers). + * + * @section section_4 Files + * @subsection sub_1 Generic + * - event_machine.h + * - Event Machine API + * The application should include this file only. + * + * Files included by event_machine.h: + * - event_machine_version.h (included by event_machine.h) + * - Event Machine version defines, macros and APIs + * - event_machine_types.h (included by event_machine.h) + * - Event Machine basic types + * - event_machine_event.h (included by event_machine.h) + * - event related functionality + * - event_machine_eo.h (included by event_machine.h) + * - EO related functionality + * - event_machine_event_group.h (included by event_machine.h) + * - event group feature for fork-join type of operations using events + * - event_machine_atomic_group.h (included by event_machine.h) + * - functionality for atomic groups of queues (API 1.1) + * - event_machine_queue.h (included by event_machine.h) + * - queue related functionality + * - event_machine_queue_group.h (included by event_machine.h) + * - queue group related functionality + * - event_machine_error.h (included by event_machine.h) + * - error management related functionality + * - event_machine_core.h (included by event_machine.h) + * - core/thread related functionality + * - event_machine_scheduler.h (included by event_machine.h) + * - scheduling related functionality + * - event_machine_dispatcher.h (included by event_machine.h) + * - dispatching related functionality + * + * @subsection sub_2 HW Specific + * - event_machine_config.h (included by event_machine.h) + * - Event Machine constants and configuration options + * - event_machine_hw_config.h (included by event_machine.h) + * - HW specific constants and configuration options + * - event_machine_hw_types.h (included by event_machine.h) + * - HW specific types + * - event_machine_init.h (included by event_machine.h) + * - Event Machine initialization + * - event_machine_pool.h (included by event_machine.h) + * - event pool related functionality + * - event_machine_hw_specific.h (included by event_machine.h) + * - HW specific functions and macros + * + * @subsection sub_3 Helper + * - event_machine_helper.h + * - optional helper routines + * - event_machine_debug.h + * - optional debug helpers (only for debug use) + * + * @example hello.c + * @example api_hooks.c + * @example dispatcher_callback.c + * @example error.c + * @example event_group.c + * @example event_group_abort.c + * @example event_group_assign_end.c + * @example event_group_chaining.c + * @example fractal.c + * @example ordered.c + * @example queue_types_ag.c + * @example queue_types_local.c + * @example queue_group.c + * add-ons: + * @example timer_hello.c + * @example timer_test.c + * performance: + * @example atomic_processing_end.c + * @example loop.c + * @example loop_multircv.c + * @example loop_refs.c + * @example pairs.c + * @example queue_groups.c + * @example queues.c + * @example queues_local.c + * @example queues_unscheduled.c + * @example scheduling_latency.c + * @example send_multi.c + * @example timer_test_periodic.c + */ + +/* EM version */ +#include + +/* EM config & types */ +#include +#include + +/* HW specific EM config & types */ +#include +#include + +/* EM error management */ +#include +/* EM Execution Object (EO) related functions */ +#include +/* EM Queue functions */ +#include +/* EM Queue Group functions */ +#include +/* EM Core functions*/ +#include +/* EM Event functions */ +#include +/* EM Atomic Group functions */ +#include +/* EM Event Group functions */ +#include +/* EM Scheduler functions */ +#include +/* EM Dispatcher functions */ +#include + +/* EM Event Pool functions */ +#include +/* EM API hooks */ +#include +/* EM initialization and termination */ +#include +/* Other HW/Platform specific functions */ +#include + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_H */ diff --git a/include/event_machine/README_API b/include/event_machine/README_API index 96f91892..906147e5 100644 --- a/include/event_machine/README_API +++ b/include/event_machine/README_API @@ -5,6 +5,183 @@ EM API Release Notes - See em-odp/README for usage and compilation instructions. - See em-odp/CHANGE_NOTES for changed and added features. +------------------------------------------------------------------------------- +API 3.0 (EM_VERSION_API_MAJOR=3, EM_VERSION_API_MINOR=0) +------------------------------------------------------------------------------- + +1. Event References + (see include/event_machine/api/event_machine_event.h for documentation) + EM APIs: + em_event_t em_event_ref(em_event_t event); + bool em_event_has_ref(em_event_t event); + + Normally, each event is associated with one event handle (em_event_t) - each + event allocation produces a new event (and associated payload data) that can + be processed, sent or freed. When the user EO has allocated or received an + event from a queue, the event payload data may be read and written as needed + by the application. + An exception to the above described scenario happens when using event + references. + An event reference is an additional event handle referring to an existing + event. New references are created with the em_event_ref() API call. The + intent of using multiple references is to avoid event copies. + An event that has multiple references shares its data with the other + reference handles and thus the (shared) data must not be modified. + Reading event data from a reference is allowed. Writes to the event data must + only be done when there is a single event handle left, i.e. when + em_event_has_ref(event) returns 'false'. Results are undefined if these + restrictions are not observed. + The event is freed when the last reference, including the original event, + is freed. + It is not allowed to use event references with event groups since assigning + an event that has references to an event group would assign all the + references to the event group resulting in undefined behaviour. E.g. using + em_send_group()/em_send_group_multi() to send a reference is wrong. + +2. Event Vectors + (see include/event_machine/api/event_machine_event.h for documentation) + EM APIs: + void em_event_vector_free(em_event_t vector_event); + uint32_t em_event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl/*out*/); + uint32_t em_event_vector_size(em_event_t vector_event); + void em_event_vector_size_set(em_event_t vector_event, uint32_t size); + uint32_t em_event_vector_max_size(em_event_t vector_event); + em_status_t em_event_vector_info(em_event_t vector_event, + em_event_vector_info_t *vector_info/*out*/); + Note: Event Vectors always have the major part of their event type set to + 'EM_EVENT_TYPE_VECTOR'. + + Vector events contain a table of events. + All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + Storing events of another type into the event-table is an error and leads to + undefined behaviour. + Event vector pools are created with em_pool_create() with the pool event-type + set to EM_EVENT_TYPE_VECTOR. Event vectors can then be allocated from vector + pools by calling em_alloc(..., vector_pool). + To free the vector event along with all events it contains, use em_free() or + em_free_multi(). + To free the vector event only, not the events it contains, + use em_event_vector_free(). + + Extension APIs for 'em-odp' (needed to set up pktio to use vectors) + (see include/event_machine/platform/event_machine_odp_ext.h) + - Get the ODP schedule group that corresponds to the given EM queue gruop + odp_schedule_group_t em_odp_qgrp2odp(em_queue_group_t queue_group); + - Map the given scheduled ODP pktin event queues to new EM queues. + int em_odp_pktin_event_queues2em(const odp_queue_t odp_pktin_evqueues[/*num*/], + em_queue_t queues[/*out:num*/], int num); + +3. Vector Pools + (see include/event_machine/platform/event_machine_pool.h for documentation) + EM pools for Event Vectors can now be created via the em_pool_create() API. + The pool config struct is extended to allow .event_type = EM_EVENT_TYPE_VECTOR. + + Changes to pool creation via configuration struct 'em_pool_cfg_t': + - em_pool_cfg_t::event_type: + The event type determines the pool type to create, now it supports vectors. + Using EM_EVENT_TYPE_VECTOR here creates subpools of type 'ODP_POOL_VECTOR'. + This kind of EM pool can ONLY be used for creating event vectors. + - em_pool_cfg_t::subpool[].size: + (when .event_type = EM_EVENT_TYPE_VECTOR) + Max number of events in a vector from the subpool, i.e. + 'number of em_event_t:s in the vector's event-table[]'. + EM does not initialize the vector. + Note! The meaning of .size is slightly different for "normal" event pools + vs. vector pools. + +4. Vector allocation and freeing + (see include/event_machine/api/event_machine_event.h for documentation) + Event vectors can be allocated from vector pools using the existing + APIs em_alloc() or em_alloc_multi() by providing + (major) type=EM_EVENT_TYPE_VECTOR and pool='vector-pool': + - em_event_t em_alloc(uint32_t size, em_event_type_t type, em_pool_t pool); + - int em_alloc_multi(em_event_t events[/*out*/], int num, + uint32_t size, em_event_type_t type, em_pool_t pool); + Minor change to the alloc APIs for the type and meaning of the 'size' arg: + 1) Packet & sw-buf: event size in bytes (B), size > 0. + *new* 2) Vector: number of event handles that should fit into + the vector table of the event, size > 0. + The change was needed when introducing EM vector-events into to API, + here the event alloc-size indicates the number of events that should + fit into the vector and not a byte-size, so uint32_t makes more sense + and is also more in line with the types used during pool-creation. + + Freeing a vector can be done with em_free/_multi(), which frees the vector + and all events it contains. Alternatively, use em_event_vector_free(), which + only frees the vector and not the events in the contained event table. + +5. Helper: EM debug timestamps + (see include/event_machine/helper/event_machine_debug.h for documentation) + Helper APIs: + uint64_t em_debug_timestamp(em_debug_tsp_t tsp); + + Use em_debug_timestamp() to retrieve a 'per EM thread' dispatcher timestamp + from EM internal timestamp-points (tsp) that can be compared to the current + time to determine e.g. scheduling latency or other overheads. + Not for normal application use! + Disabled by default (returns 0) and can be enabled via the configure option + '--enable-debug-timestamps' or by changing the 'EM_DEBUG_TIMESTAMP_ENABLE' + define. + The type 'em_debug_ts_t' defines the available internal EM timestamp points, + see the type definition. + +6. Other + Event: rename functions for major and minor event types + Rename em_get_type_major() to em_event_type_major() and + em_get_type_minor() to em_event_type_major(). + Create defines for the old names for backwards compatibility. + + Event: em_event_get_size() - returns uint32_t instead of size_t to reflect + changes in em_alloc() etc. + Hooks: em_api_hook_alloc_t - arg 'size' changed to uint32_t instead of size_t + to reflect changes in em_alloc() etc. + +------------------------------------------------------------------------------- +API 2.10 (EM_VERSION_API_MAJOR=2, EM_VERSION_API_MINOR=10) +------------------------------------------------------------------------------- +Backwards compatible with EM 2.9 API + +1. EM Idle Hooks + (see include/event_machine/platform/event_machine_hooks.h and + CHANGE_NOTES for more documentation and details) + EM APIs: + em_status_t em_hooks_register_to_idle(em_idle_hook_to_idle_t func); + em_status_t em_hooks_unregister_to_idle(em_idle_hook_to_idle_t func); + em_status_t em_hooks_register_to_active(em_idle_hook_to_active_t func); + em_status_t em_hooks_unregister_to_active(em_idle_hook_to_active_t func); + em_status_t em_hooks_register_while_idle(em_idle_hook_while_idle_t func); + em_status_t em_hooks_unregister_while_idle(em_idle_hook_while_idle_t func); + Up to 'EM_CALLBACKS_MAX' idle hooks of each type can be registered. + + Alternatively, the idle hooks can be provided as arguments to em_init() + at start-up: + em_init(em_conf_t::idle_hooks.to_idle_hook + .to_active_hook + .while_idle_hook); + The arg 'em_conf_t conf' of em_init() has new fields in + em_conf_t::idle_hooks{}. The usage if em_init() and em_conf_t is backwards + compatible with API v2.9 since em_conf_init(&conf) will init the new fields + to NULL/0; + + EM Idle hook functions can be registered for tracking the idle state + (ACTIVE/IDLE) of EM cores: + 'To_idle' hooks are called when the core state changes from ACTIVE to IDLE. + 'To_active' hooks are called when the core state changes from IDLE to ACTIVE. + 'While_idle' hooks are called when the core is already in the IDLE state and + doesn't get any events from scheduled or local queues. + + Idle hook support is only available when EM_IDLE_HOOKS_ENABLE != 0 + (see the configure --enable-idle_hooks option) + +2. Startup Pools in config file + (see config/em-odp.conf and CHANGE_NOTES for more documentation and details) + Configure EM event-pools via the EM config file. These pools will be created + during EM startup. The changes are mostly related to the EM config file but + impacts how the em_conf_t::default_pool_cfg given to em_init() is used: + - if the default pool configuration is also given in the config file + through the 'startup_pools' option, it will override the default pool + configuration given via em_conf_t::default_pool_cfg to em_init(). + ------------------------------------------------------------------------------- API 2.9 (EM_VERSION_API_MAJOR=2, EM_VERSION_API_MINOR=9) ------------------------------------------------------------------------------- @@ -237,10 +414,10 @@ types and functions requiring small changes to older code. - New Error return codes for em_tmo_set_abs() and em_tmo_set_periodic() related to timing errors: - EM_OK          success (event taken) - EM_ERR_TOONEAR failure, tick value is past or too close to current time - EM_ERR_TOOFAR  failure, tick value exceeds timer capability (too far ahead) - (other_codes)  other failure + EM_OK success (event taken) + EM_ERR_TOONEAR failure, tick value is past or too close to current time + EM_ERR_TOOFAR failure, tick value exceeds timer capability (too far ahead) + (other_codes) other failure New Error return codes for em_tmo_ack(): EM_OK success EM_ERR_CANCELED timer has been cancelled @@ -322,7 +499,7 @@ types requiring small changes to older code. Inquire timer capabilities for a specific resolution or maximum timeout. - em_tmo_set_periodic(): Allow an absolute start time for periodic timeouts - with the new API em_tmo_set_periodic(..., em_timer_tick_t start_abs, ...) + with the new API em_tmo_set_periodic(..., em_timer_tick_t start_abs, ...) Note: Prefer the new em_tmo_set_periodic() for activating/setting periodic timeouts over em_tmo_set_rel(). Use em_tmo_set_abs() or em_tmo_set_rel() for oneshot timeouts. diff --git a/include/event_machine/api/event_machine_event.h b/include/event_machine/api/event_machine_event.h index eadd43c2..52589ff2 100644 --- a/include/event_machine/api/event_machine_event.h +++ b/include/event_machine/api/event_machine_event.h @@ -1,727 +1,1033 @@ -/* - * Copyright (c) 2015-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_EVENT_H_ -#define EVENT_MACHINE_EVENT_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_event Events - * Operations on an event. - * @{ - * - * All application processing is driven by events in the Event Machine. An event - * describes a piece of work or data to be processed. The structure of an event - * is implementation and event type specific: it may be a directly accessible - * buffer of memory, a descriptor containing a list of buffer pointers, - * a descriptor of a packet buffer etc. - * - * Applications use the event type to interpret the event structure. - * - * Events follow message passing semantics: an event has to be allocated using - * the provided API (em_alloc()) or received through queues by an EO callback - * function after which the event is owned by the application. Event ownership - * is transferred back to the system by using em_send() or em_free(). - * An event not owned by the application should not be touched. - * - * Since em_event_t may not carry a direct pointer value to the event structure, - * em_event_pointer() must be used to translate an event to an event structure - * pointer (for maintaining portability). - * - * Additionally, an event may contain a user area separate from the event - * payload. The size of the event user area is set when creating the event pool - * from which the event is allocated. The user area is a fixed size (per pool) - * data area into which event related state data can be stored without having - * to access and change the payload. Note that the size of the event user area - * can be zero(0), depending on event pool configuration. - * Note that the user area content is not initialized by EM, neither em_alloc() - * nor em_free() will touch it and thus it might contain old user data set the - * last time the area was used during a previous allocation of the same event. - * Since the user area is not part of the event payload, it will not be - * transmitted as part of a packet etc. - * A user area ID can further be used to identify the user area contents. - * The event user area ID is stored outside of the user area itself and is thus - * always available, even if the size of the user area data is set to zero(0). - * See em_pool_create(), em_event_uarea_get(), em_event_uarea_id_get/set() and - * em_event_uarea_info() for more information on the event user area and its - * associated ID. - * - * em_event_t is defined in event_machine_types.h - * - * @see em_event_pointer() - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/** - * Allocate an event. - * - * The memory address of the allocated event is system specific and can depend - * on the given pool, event size and type. The returned event (handle) may refer - * to a memory buffer or a HW specific descriptor, i.e. the event structure is - * system specific. - * - * Use em_event_pointer() to convert an event (handle) to a pointer to the - * event payload. EM does not initialize the payload data. - * - * EM_EVENT_TYPE_SW with minor type '0' is reserved for direct portability - - * it is always guaranteed to produce an event with contiguous payload that can - * directly be used by the application up to the given size (no HW specific - * descriptors etc. are visible). This event payload will be 64-bit aligned - * by default (unless explicitly configured otherwise). - * - * EM_POOL_DEFAULT can be used as a pool handle if there's no need to use a - * specific event pool (up to the size- or event limits of that pool). - * - * Additionally it is guaranteed, that two separate buffers never share a cache - * line (to avoid false sharing). - * - * @param size Event size in octets (size > 0) - * @param type Event type to allocate - * @param pool Event pool handle - * - * @return The allocated event or EM_EVENT_UNDEF on error. - * - * @see em_free(), em_send(), em_event_pointer(), em_receive_func_t(), - * em_event_clone() - */ -em_event_t em_alloc(size_t size, em_event_type_t type, em_pool_t pool); - -/** - * Allocate multiple events. - * - * Similar to em_alloc(), but allows allocation of multiple events, with same - * properties, with one function call. - * The em_alloc_multi() API function will try to allocate the requested number - * ('num') of events but may fail to do so, e.g. if the pool has run out of - * events, and will return the actual number of events that were successfully - * allocated from the given pool. - * - * @param[out] events Output event array, events are allocated and filled by - * em_alloc_multi(). The given array must fit 'num' events. - * @param num Number of events to allocate and write into 'events[]' - * @param size Event size in octets (size > 0) - * @param type Event type to allocate - * @param pool Event pool handle - * - * @return Number of events actually allocated from the pool (0 ... num) and - * written into the output array 'events[]'. - */ -int em_alloc_multi(em_event_t events[/*out*/], int num, - size_t size, em_event_type_t type, em_pool_t pool); - -/** - * Free an event. - * - * The em_free() function transfers ownership of the event back to the system - * and the application must not touch the event (or related memory buffers) - * after calling it. - * - * It is assumed that the implementation can detect the event pool that - * the event was originally allocated from. - * - * The application must only free events it owns. For example, the sender must - * not free an event after sending it. - * - * @param event Event to be freed - * - * @see em_alloc(), em_receive_func_t() - */ -void em_free(em_event_t event); - -/** - * Free multiple events. - * - * Similar to em_free(), but allows freeing of multiple events with one - * function call. - * - * @param[in] events Array of events to be freed - * @param num The number of events in the array 'events[]' - */ -void em_free_multi(const em_event_t events[], int num); - -/** - * Send an event to a queue. - * - * The event must have been allocated with em_alloc(), or received via an EO - * receive-function. The sender must not touch the event after calling em_send() - * as the ownership has been transferred to the system or possibly to the next - * receiver. If the return status is *not* EM_OK, the ownership has not been - * transferred and the application is still responsible for the event (e.g. may - * free it). - * - * EM does not currently define guaranteed event delivery, i.e. EM_OK return - * value only means the event was accepted for delivery. It could still be lost - * during delivery (e.g. due to a removed queue or system congestion, etc). - * - * @param event Event to be sent - * @param queue Destination queue - * - * @return EM_OK if successful (accepted for delivery). - * - * @see em_alloc() - */ -em_status_t em_send(em_event_t event, em_queue_t queue); - -/** - * Send multiple events to a queue. - * - * As em_send, but multiple events can be sent with one call for potential - * performance gain. - * The function returns the number of events actually sent. A return value equal - * to the given 'num' means that all events were sent. A return value less than - * 'num' means that only the first 'num' events were sent and the rest must be - * handled by the application. - * - * @param events Array of events to send - * @param num Number of events. - * The array 'events[]' must contain 'num' entries. - * @param queue Destination queue - * - * @return number of events successfully sent (equal to num if all successful) - * - * @see em_send() - */ -int em_send_multi(const em_event_t events[], int num, em_queue_t queue); - -/** - * Get a pointer to the event structure - * - * Returns a pointer to the event structure or NULL. The event structure is - * implementation and event type specific. It may be a directly accessible - * buffer of memory, a descriptor containing a list of buffer pointers, - * a descriptor of a packet buffer, etc. - * - * @param event Event from receive/alloc - * - * @return Event pointer or NULL - */ -void * -em_event_pointer(em_event_t event); - -/** - * Returns the size of the given event - * - * The event content is not defined by the OpenEM API, thus this returns an - * event type specific value (the exception and a defined case is - * EM_EVENT_TYPE_SW + minor 0, in which case the usable size of the allocated - * contiguous memory buffer is returned). - * - * @param event Event handle - * - * @return Event type specific value typically payload size (bytes). - */ -size_t em_event_get_size(em_event_t event); - -/** - * @brief Returns the EM event-pool the event was allocated from. - * - * The EM event-pool for the given event can only be obtained if the event has - * been allocated from a pool created with em_pool_create(). For other pools, - * e.g. external (to EM) pktio pools, EM_POOL_UNDEF is returned. - * - * @param event Event handle - * - * @return The EM event-pool handle or EM_POOL_UNDEF if no EM pool is found. - * EM_POOL_UNDEF is returned also for a valid event that has been - * allocated from a pool external to EM (no error is reported). - */ -em_pool_t em_event_get_pool(em_event_t event); - -/** - * Set the event type of an event - * - * This will not create a new event but the existing event might be modified. - * The operation may fail if the new type is not compatible with the old one. - * As event content is not defined by the OpenEM API the compatibility is - * system specific. - * - * @param event Event handle - * @param newtype New type for the event - * - * @return EM_OK on success - * - * @see em_alloc() - */ -em_status_t em_event_set_type(em_event_t event, em_event_type_t newtype); - -/** - * Get the event type of an event - * - * Returns the type of the given event. - * - * @param event Event handle - * - * @return event type, EM_EVENT_TYPE_UNDEF on error - */ -em_event_type_t em_event_get_type(em_event_t event); - -/** - * Get the event types of multiple events - * - * Writes the event type of each given event into an output type-array and - * returns the number of entries written. - * Note, if 'events[num]' are all of the same type then 'types[num]' will - * contain 'num' same entries. - * - * @param events Event handles: events[num] - * @param[out] types Event types (output array): types[num] - * (types[i] is the type of events[i]) - * @param num Number of events and output types. - * The array 'events[]' must contain 'num' entries and the - * output array 'types[]' must have room for 'num' entries. - * - * @return Number of event types (0...num) written into 'types[]'. - * The return value (always >=0) is usually 'num' and thus '=0), includes and starts from events[0]. - * The return value is usually '>=1' and thus '0' is only seen in - * error scenarios when the type of the first event could not be - * obtained or if the given 'num=0'. - * The function stops and returns on the first error. - */ -int em_event_same_type_multi(const em_event_t events[], int num, - em_event_type_t *same_type /*out*/); - -/** - * Mark the event as "sent". - * - * Indicates a user-given promise to EM that the event will later appear into - * 'queue' by some means other than an explicit user call to em_send...(). - * Calling em_event_mark_send() transfers event ownership away from the user, - * and thus the event must not be used or touched by the user anymore (the only - * exception is (hw) error recovery where the "sent" state can be cancelled by - * using em_event_unmark_send() - dangerous!). - * - * Example use case: - * A user provided output-callback function associated with a queue of type - * 'EM_QUEUE_TYPE_OUTPUT' can use this API when configuring a HW-device to - * deliver the event back into EM. The HW will eventually "send" the event and - * it will "somehow" again appear into EM for the user to process. - * - * EM will, after this API-call, treat the event as "sent" and any further API - * operations or usage might lead to EM errors (depending on the error-check - * level), e.g. em_send/free/tmo_set/ack(event) etc. is forbidden after - * em_event_mark_send(event). - * - * @note Registered API-callback hooks for em_send...() (em_api_hook_send_t) - * will NOT be called. - * @note Marking an event "sent" with an event group (corresponding to - * em_send_group()) is currrently NOT supported. - * - * @param event Event to be marked as "sent" - * @param queue Destination queue (must be scheduled, i.e. atomic, - * parallel or parallel-ordered) - * - * @return EM_OK if successful - * - * @see em_send(), em_event_unmark_send() - */ -em_status_t em_event_mark_send(em_event_t event, em_queue_t queue); - -/** - * Unmark an event previously marked as "sent" (i.e mark as "unsent") - * - * @note This is for recovery situations only and can potenially crash the - * application if used incorrectly! - * - * Revert an event's "sent" state, as set by em_event_mark_send(), back to the - * state before the mark-send function call. - * Any further usage of the event after em_event_mark_send(), by EM or - * the user, will result in error when calling em_event_unmark_send() since the - * state has become unrecoverable. - * => the only allowed EM API call after em_event_mark_send() is - * em_event_unmark_send() if it is certain that the event, due to some - * external error, will never be sent into EM again otherwise. - * Calling em_event_unmark_send() transfers event ownership back to the user - * again. - * - * @note Unmark-send and unmark-free are the only valid cases of using an event - * that the user no longer owns - all other such uses leads to fatal error - * - * @code - * em_status_t err; - * hw_err_t hw_err; - * - * // 'event' owned by the user - * err = em_event_mark_send(event, queue); - * if (err != EM_OK) - * return err; // NOK - * // 'event' no longer owned by the user - don't touch! - * - * hw_err = config_hw_to_send_event(...hw-cfg..., event, queue); - * if (hw_err) { - * // hw config error - the event can be recovered if it is - * // certain that the hw won't send that same event. - * // note: the user doesn't own the event here and actually - * // uses an obsolete event handle to recover the event. - * err = em_event_unmark_send(event); - * if (err != EM_OK) - * return err; // NOK - * // 'event' recovered, again owned by the user - * em_free(event); - * } - * @endcode - * - * @param event Event previously marked as "sent" with em_event_mark_send(), - * any other case will be invalid! - * - * @return EM_OK if successful - * - * @see em_send(), em_event_mark_send() - */ -em_status_t em_event_unmark_send(em_event_t event); - -/** - * @brief Mark the event as "free". - * - * Indicates a user-given promise to EM that the event will be freed back into - * the pool it was allocated from e.g. by HW or device drivers (external to EM). - * Calling em_event_mark_free() transfers event ownership away from the user, - * and thus the event must not be used or touched by the user anymore. - * - * Example use case: - * A user provided output-callback function associated with a queue of type - * 'EM_QUEUE_TYPE_OUTPUT' can use this API when configuring a HW-device or - * device-driver to free the event (outside of EM) after transmission. - * - * EM will, after this API-call, treat the event as "freed" and any further API - * operations or usage might lead to EM errors (depending on the error-check - * level), e.g. em_send/free/tmo_set/ack(event) etc. is forbidden after - * em_event_mark_free(event). - * - * @note Registered API-callback hooks for em_free/_multi() (em_api_hook_free_t) - * will NOT be called. - * - * @param event Event to be marked as "free" - * - * @see em_free(), em_event_unmark_free() - */ -void em_event_mark_free(em_event_t event); - -/** - * @brief Unmark an event previously marked as "free" - * (i.e mark as "allocated" again). - * - * @note This is for recovery situations only and can potenially crash the - * application if used incorrectly! Unmarking the free-state of an event - * that has already been freed will lead to fatal error. - * - * Revert an event's "free" state, as set by em_event_mark_free(), back to the - * state before the mark-free function call. - * Any further usage of the event after em_event_mark_free(), by EM or the user, - * will result in error when calling em_event_unmark_free() since the state has - * become unrecoverable. - * => the only allowed EM API call after em_event_mark_free() (for a certain - * event) is em_event_unmark_free() when it is certain that the event, due to - * some external error, will not be freed otherwise and must be recovered - * back into the EM-domain so that calling em_free() by the user is possible. - * Calling em_event_unmark_free() transfers event ownership back to the user - * again. - * - * @note Unmark-send and unmark-free are the only valid cases of using an event - * that the user no longer owns - all other such uses leads to fatal error - * - * @code - * em_status_t err; - * hw_err_t hw_err; - * - * // 'event' owned by the user - * em_event_mark_free(event); - * // 'event' no longer owned by the user - don't touch! - * - * hw_err = config_hw_to_transmit_event(...hw-cfg..., event); - * if (hw_err) { - * // hw config error - the event can be recovered if it is - * // certain that the hw won't free that same event. - * // note: the user doesn't own the event here and actually - * // uses an obsolete event handle to recover the event. - * em_event_unmark_free(event); - * // 'event' recovered, again owned by the user - * em_free(event); - * } - * @endcode - * - * @param event Event previously marked as "free" with - * em_event_mark_free/_multi(), any other usecase is invalid! - * - * @see em_free(), em_event_mark_free() - */ -void em_event_unmark_free(em_event_t event); - -/** - * @brief Mark multiple events as "free". - * - * Similar to em_event_mark_free(), but allows the marking of multiple events - * as "free" with one function call. - * - * @note Registered API-callback hooks for em_free/_multi() (em_api_hook_free_t) - * will NOT be called. - * - * @param[in] events Array of events to be marked as "free" - * @param num The number of events in the array 'events[]' - */ -void em_event_mark_free_multi(const em_event_t events[], int num); - -/** - * @brief Unmark multiple events previously marked as "free". - * - * @note This is for recovery situations only and can potenially crash the - * application if used incorrectly! - * - * Similar to em_event_unmark_free(), but allows to do the "free"-unmarking of - * multiple events with one function call. - * - * @param[in] events Events previously marked as "free" with - * em_event_mark_free/_multi(), any other usecase is invalid! - * @param num The number of events in the array 'events[]' - */ -void em_event_unmark_free_multi(const em_event_t events[], int num); - -/** - * @brief Clone an event. - * - * Allocate a new event with identical payload to the given event. - * - * @note Event metadata, internal headers and state are _NOT_ cloned - * (e.g. the event-group of a cloned event is EM_EVENT_GROUP_UNDEF etc). - * - * @param event Event to be cloned, must be a valid event. - * @param pool Optional event pool to allocate the cloned event from. - * Use 'EM_POOL_UNDEF' to clone from the same pool as 'event' - * was allocated from. - * The event-type of 'event' must be suitable for allocation - * from 'pool' (e.g. EM_EVENT_TYPE_PACKET can not be - * allocated from a pool supporting only EM_EVENT_TYPE_SW) - * - * @return The cloned event or EM_EVENT_UNDEF on error. - * - * @see em_alloc(), em_free() - */ -em_event_t em_event_clone(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/); - -/** - * @brief Get a pointer to the event user area, optionally along with its size. - * - * The event user area is a fixed sized area located within the event metadata - * (i.e. outside of the event payload) that can be used to store application - * specific event related data without the need to adjust the payload. - * The event user area is configured during EM event pool creation and thus the - * size of the user area is set per pool. - * - * Note that the user area content is not initialized by EM, neither em_alloc() - * nor em_free() will touch it and thus it might contain old user data set the - * last time the area was used during a previous allocation of the same event. - * Since the user area is not part of the event payload, it will not be - * transmitted as part of a packet etc. - * - * @param event Event handle to get the user area of - * @param[out] size Optional output arg into which the user area size is - * stored. Use 'size=NULL' if no size information is needed. - * - * @return a pointer to the event user area - * @retval NULL on error or if the event contains no user area - * - * @see em_pool_create() for pool specific configuration and - * the EM runtime config file em-odp.conf for the default value: - * 'pool.user_area_size'. - * @see em_event_uarea_info() if both user area ptr and ID is needed - */ -void *em_event_uarea_get(em_event_t event, size_t *size/*out*/); - -/** - * @brief Get the event user area ID along with information if it has been set - * - * The event user area can be associated with an optional ID that e.g. can be - * used to identify the contents of the actual user area data. The ID is stored - * outside of the actual user area data and is available for use even if the - * user area size has been set to zero(0) for the pool the event was allocated - * from. - * - * This function is used to determine whether the user area ID has been set - * earlier and to retrieve the ID in the case it has been set. - * EM will initialize 'ID isset = false' when allocating a new event (indicating - * that the ID is not set). Use em_event_uarea_id_set() to set the ID. - * - * @param event Event handle to get the user area ID and "set"-status of - * @param[out] isset Optional output arg: has the ID been set previously? - * At least one of 'isset' and 'id' must be given (or both). - * @param[out] id Optional output arg into which the user area ID is - * stored if it has been set before. The output arg 'isset' - * should be used to determine whether 'id' has been set. - * Note: 'id' will not be touched if the ID has not been set - * earlier (i.e. when 'isset' is 'false'). - * At least one of 'isset' and 'id' must be given (or both). - * - * @return EM_OK if successful - * - * @see em_event_uarea_id_set(), em_event_uarea_get() - * @see em_event_uarea_info() if both user area ptr and ID is needed - */ -em_status_t em_event_uarea_id_get(em_event_t event, bool *isset /*out*/, - uint16_t *id /*out*/); - -/** - * @brief Set the event user area ID - * - * The event user area can be associated with an optional ID that e.g. can be - * used to identify the contents of the actual user area data. The ID is stored - * outside of the actual user area data and is available for use even if the - * user area size has been set to 0 for the pool the event was allocated from. - * - * This function is used to set the event user area ID for the given event. - * The 'set' operation overwrites any ID stored earlier. - * Use em_event_uarea_id_get() to check whether an ID has been set earlier and - * to retrieve the ID. - * - * @param event Event handle for which to set the user area ID - * @param id The user area ID to set - * - * @return EM_OK if successful - * - * @see em_event_uarea_id_get(), em_event_uarea_get(), em_event_uarea_info() - */ -em_status_t em_event_uarea_id_set(em_event_t event, uint16_t id); - -/** - * @brief Event user area information filled by em_event_uarea_info() - * - * Output structure for obtaining information about an event's user area. - * Information related to the user area will be filled into this struct by - * the em_event_uarea_info() API function. - * - * A user area is only present if the EM pool the event was allocated from - * was created with user area size > 0, see em_pool_cfg_t and em_pool_create(). - * The user area ID can always be used (set/get), even when the size of the - * user area is zero(0). - * - * @see em_event_uarea_info(), em_event_uarea_id_set() - */ -typedef struct { - /** Pointer to the event user area, NULL if event has no user area */ - void *uarea; - /** Size of the event user area, zero(0) if event has no user area */ - size_t size; - - /** Event user area ID (ID can be set/get even when no uarea present) */ - struct { - /** Boolean: has the ID been set previously? true/false */ - bool isset; - /** Value of the user area ID, if (and only if) set before. - * Only inspect '.id.value' when '.id.isset=true' indicating - * that ID has been set earlier by em_event_uarea_id_set(). - */ - uint16_t value; - } id; -} em_event_uarea_info_t; - -/** - * @brief Get the event user area information for a given event. - * - * Obtain information about the event user area for a certain given event. - * Information containing the user area pointer, size, as well as the ID is - * output via the 'uarea_info' struct. - * This API function combines the functionality of em_event_uarea_get() and - * em_event_uarea_id_get() for use cases where both the user area pointer as - * well as the ID is needed. Calling one API function instead of two might be - * faster due to a fewer checks and internal conversions. - * - * The event user area is a fixed sized area located within the event metadata - * (i.e. outside of the event payload) that can be used to store application - * specific event related data without the need to adjust the payload. - * The event user area is configured during EM event pool creation and thus the - * size of the user area is set per pool. - * - * Note that the user area content is not initialized by EM, neither em_alloc() - * nor em_free() will touch it and thus it might contain old user data set the - * last time the area was used during a previous allocation of the same event. - * Since the user area is not part of the event payload, it will not be - * transmitted as part of a packet etc. - * - * The event user area can be associated with an optional ID that can be used to - * identify the contents of the actual user area data. The ID is stored - * outside of the actual user area data and is available for use even if the - * user area size has been set to zero(0) for the pool the event was allocated - * from. EM will initialize 'uarea_info.id.isset = false' when allocating - * a new event (indicating that the ID is not set). - * - * @param event Event handle to get the user area information of. - * @param[out] uarea_info Output struct into which the user area information - * is stored. - * - * @return EM status code incidating success or failure of the operation. - * @retval EM_OK Operation successful. - * @retval Other Operation FAILED and no valid user area info could - * be obtained, 'uarea_info' is all NULL/zero(0) in this case. - * - * @see em_pool_create() for pool specific configuration and - * the EM runtime config file em-odp.conf for the default value: - * 'pool.user_area_size'. - * @see em_event_uarea_get(), em_event_uarea_id_get() - */ -em_status_t em_event_uarea_info(em_event_t event, - em_event_uarea_info_t *uarea_info /*out*/); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_EVENT_H_ */ +/* + * Copyright (c) 2015-2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_EVENT_H_ +#define EVENT_MACHINE_EVENT_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_event Events + * Operations on an event. + * @{ + * + * All application processing is driven by events in the Event Machine. An event + * describes a piece of work or data to be processed. The structure of an event + * is implementation and event type specific: it may be a directly accessible + * buffer of memory, packet headers and data, a vector or user specified content + * etc. + * + * Applications use the event type to interpret the event structure. The event + * type consists of a major and a minor part: the major part specifies the + * actual type or structure of the event (sw buf, packet, vector etc.) while the + * minor part is user specific and can be used to distinguish between different + * use cases of the event. + * + * Events follow message passing semantics: an event has to be allocated using + * the provided API (em_alloc()) or received through queues by an EO callback + * function after which the event is owned by the application. Event ownership + * is transferred back to the system by using em_send() or em_free(). + * An event not owned by the application must not be touched. + * + * The event handle, of type em_event_t, is not a direct pointer to the event + * structure, hence EM API functions must be used to get access to the contained + * data: for events of (major) type sw buffer or packet use em_event_pointer() + * while for vector events the contained array of event handles must be accessed + * with em_event_vector_tbl() instead. Use the (major part of the) event type to + * distiguish between vectors and other types of events. + * + * ### Event References + * Normally, each event is associated with one event handle (em_event_t) - each + * event allocation produces a new event (and associated payload data) that can + * be processed, sent or freed. When the user EO has allocated or received an + * event from a queue, the event payload data may be read and written as needed + * by the application. + * An exception to the above described scenario happens when using event + * references. + * An event reference is an additional event handle referring to an existing + * event. New references are created with the em_event_ref() API call. The + * intent of using multiple references is to avoid event copies. + * An event that has multiple references shares its data with the other + * reference handles and thus the (shared) data must not be modified. + * Reading event data from a reference is allowed. Writes to the event data must + * only be done when there is a single event handle left, i.e. when + * em_event_has_ref(event) returns 'false'. Results are undefined if these + * restrictions are not observed. + * The event is freed when the last reference, including the original event, + * is freed. + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group()/em_send_group_multi() to send a reference is wrong. + * + * ### Event User Area + * Additionally, an event may contain a user area separate from the event + * payload. The size of the event user area is set when creating the event pool + * from which the event is allocated. The user area is a fixed size (per pool) + * data area into which event related state data can be stored without having + * to access and change the payload. Note that the size of the event user area + * can be zero(0), depending on event pool configuration. + * Note also that the user area content is not initialized by EM, neither + * em_alloc() nor em_free() will touch it and thus it might contain old user + * data set the last time the area was used during a previous allocation of the + * same event. Since the user area is not part of the event payload, it will not + * be transmitted as part of a packet etc. + * A user area ID can further be used to identify the user area contents. + * The event user area ID is stored outside of the user area itself and is thus + * always available, even if the size of the user area data is set to zero(0). + * See em_pool_create(), em_event_uarea_get(), em_event_uarea_id_get/set() and + * em_event_uarea_info() for more information on the event user area and its + * associated ID. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * Allocate an event. + * + * The memory address of the allocated event is system specific and can depend + * on the given pool, event size and type. The returned event (handle) may refer + * to a memory buffer, packet or vector etc., i.e. the event structure is event + * type specific. + * + * Use em_event_pointer(), or for vectors em_event_vector_tbl(), to convert an + * event (handle) to a pointer to the event payload or access the vector table. + * EM does not initialize the payload data. + * + * EM_EVENT_TYPE_SW with minor type '0' is reserved for direct portability - + * it is always guaranteed to produce an event with contiguous payload that can + * directly be used by the application up to the given size (no HW specific + * descriptors etc. are visible). This event payload will be 64-bit aligned + * by default (unless explicitly configured otherwise). + * + * EM_POOL_DEFAULT can be used as a pool handle if there's no need to use a + * specific event pool (up to the size- or event limits of that pool). + * + * Additionally it is guaranteed, that two separate buffers never share a cache + * line (to avoid false sharing). + * + * @param size 1) Packet & sw-buf: event size in bytes (B), size > 0. + * 2) Vector: number of event handles that should fit into + * the vector table of the event, size > 0. + * @param type Event type to allocate. The event major-type must be + * supported by given 'pool'. Vector events must be + * allocated with major type EM_EVENT_TYPE_VECTOR from a + * pool created to support vectors. + * @param pool Event pool handle. The pool must have been created to + * support events of type 'em_event_type_major(type)' + * + * @return The allocated event or EM_EVENT_UNDEF on error. + * + * @see em_free(), em_send(), em_event_pointer(), em_receive_func_t, + * em_event_clone() etc. + * @see additionally for vector events: em_event_vector_tbl(), + * em_event_vector_free() etc. + */ +em_event_t em_alloc(uint32_t size, em_event_type_t type, em_pool_t pool); + +/** + * Allocate multiple events. + * + * Similar to em_alloc(), but allows allocation of multiple events, with same + * properties, with one function call. + * The em_alloc_multi() API function will try to allocate the requested number + * ('num') of events but may fail to do so, e.g. if the pool has run out of + * events, and will return the actual number of events that were successfully + * allocated from the given pool. + * + * @param[out] events Output event array, events are allocated and filled by + * em_alloc_multi(). The given array must fit 'num' events. + * @param num Number of events to allocate and write into 'events[]' + * @param size 1) Packet & sw-buf: event size in bytes (B), size > 0. + * 2) Vector: number of event handles that should fit into + * the vector table of the event, size > 0. + * @param type Event type to allocate. The event major-type must be + * supported by given 'pool'. Vector events must be + * allocated with major type EM_EVENT_TYPE_VECTOR from a + * pool created to support vectors. + * @param pool Event pool handle. The pool must have been created to + * support events of type 'em_event_type_major(type)' + * + * @return Number of events actually allocated from the pool (0 ... num) and + * written into the output array 'events[]'. + */ +int em_alloc_multi(em_event_t events[/*out*/], int num, + uint32_t size, em_event_type_t type, em_pool_t pool); + +/** + * Free an event. + * + * The em_free() function transfers ownership of the event back to the system + * and the application must not touch the event (or related memory buffers) + * after calling it. + * + * It is assumed that the implementation can detect the event pool that + * the event was originally allocated from. + * + * The application must only free events it owns. For example, the sender must + * not free an event after sending it. + * + * @note Freeing a vector event (of type EM_EVENT_VECTOR) with this API will + * also free the events contained in the vector's event-table. + * To free only the vector event itself, use em_event_vector_free(). + * + * @param event Event to be freed + * + * @see em_alloc(), em_free_multi(), em_event_vector_free() + */ +void em_free(em_event_t event); + +/** + * Free multiple events. + * + * Similar to em_free(), but allows freeing of multiple events with one + * function call. + * + * @note Freeing vector events (of type EM_EVENT_VECTOR) with this API will + * also free the events contained in the vectors' event-table. + * To free only the vector event itself, use em_event_vector_free(). + * + * @param[in] events Array of events to be freed + * @param num The number of events in the array 'events[]' + */ +void em_free_multi(const em_event_t events[], int num); + +/** + * Send an event to a queue. + * + * The event must have been allocated with em_alloc(), or received via an EO + * receive-function. The sender must not touch the event after calling em_send() + * as the ownership has been transferred to the system or possibly to the next + * receiver. If the return status is *not* EM_OK, the ownership has not been + * transferred and the application is still responsible for the event (e.g. may + * free it). + * + * EM does not currently define guaranteed event delivery, i.e. EM_OK return + * value only means the event was accepted for delivery. It could still be lost + * during delivery (e.g. due to a removed queue or system congestion, etc). + * + * @param event Event to be sent + * @param queue Destination queue + * + * @return EM_OK if successful (accepted for delivery). + * + * @see em_alloc() + */ +em_status_t em_send(em_event_t event, em_queue_t queue); + +/** + * Send multiple events to a queue. + * + * As em_send, but multiple events can be sent with one call for potential + * performance gain. + * The function returns the number of events actually sent. A return value equal + * to the given 'num' means that all events were sent. A return value less than + * 'num' means that only the first 'num' events were sent and the rest must be + * handled by the application. + * + * @param events Array of events to send + * @param num Number of events. + * The array 'events[]' must contain 'num' entries. + * @param queue Destination queue + * + * @return number of events successfully sent (equal to num if all successful) + * + * @see em_send() + */ +int em_send_multi(const em_event_t events[], int num, em_queue_t queue); + +/** + * Get a pointer to the event structure/data. + * + * Returns a pointer to the event structure or NULL. The event structure is + * implementation and event type specific. It may be a directly accessible + * buffer of memory, packet headers and data or user specified content etc. + * Use em_event_get_type() and em_event_type_major() to determine the type + * of the event. + * + * @note em_event_pointer() should NOT be used with events of (major) type + * EM_EVENT_TYPE_VECTOR - usage with vectors returns NULL and an error + * is reported. + * Instead, when dealing with event vectors, use em_event_vector_tbl() to + * get access to the vector table. + * + * @param event Event handle + * + * @return Event structure/data pointer + * @retval NULL on unsupported event type or other error + * + * @see em_event_vector_tbl() when dealing with vector events. + */ +void *em_event_pointer(em_event_t event); + +/** + * Returns the event payload size in bytes (B) of the given event + * + * Returns the event type specific payload size of the event. For events of + * (major) type sw buf or packet the size is the available buffer/payload size + * in bytes (B). + * + * @note Do not use this API function for vector events, instead use + * em_event_vector_size(), em_event_vector_max_size() or + * em_event_vector_info(). Use the event type to distinguish between + * vectors and other types of events. + * + * @param event Event handle + * + * @return Event type specific payload size in bytes. + */ +uint32_t em_event_get_size(em_event_t event); + +/** + * @brief Returns the EM event-pool the event was allocated from. + * + * The EM event-pool for the given event can only be obtained if the event has + * been allocated from a pool created with em_pool_create(). For other pools, + * e.g. external (to EM) pktio pools, EM_POOL_UNDEF is returned. + * + * @param event Event handle + * + * @return The EM event-pool handle or EM_POOL_UNDEF if no EM pool is found. + * EM_POOL_UNDEF is returned also for a valid event that has been + * allocated from a pool external to EM (no error is reported). + */ +em_pool_t em_event_get_pool(em_event_t event); + +/** + * Set the event type of an event + * + * The operation may fail if (the major part of) the new type is not compatible + * with the old one. + * EM does not check the compatibility of the new vs. old event type for all + * cases, thus the user must take care not to incorrectly update the type. + * + * @note Vector events must always have their major type set to + * EM_EVENT_TYPE_VECTOR or EM will not recognize them as vectors. + * + * @param event Event handle + * @param newtype New type for the event + * + * @return EM_OK on success + * + * @see em_alloc(), em_event_get_type/_multi(), + * em_event_type_major(), em_event_type_minor(), + * em_receive_func_t(..., em_event_type_t type, ...) + */ +em_status_t em_event_set_type(em_event_t event, em_event_type_t newtype); + +/** + * Get the event type of an event + * + * Returns the type of the given event. The type has been set by em_alloc...(), + * em_event_set_type() or e.g. packet input. + * + * @param event Event handle + * + * @return event type, EM_EVENT_TYPE_UNDEF on error + * + * @see em_alloc(), em_event_set_type(), + * em_event_type_major(), em_event_type_minor(), + * em_receive_func_t(..., em_event_type_t type, ...) + */ +em_event_type_t em_event_get_type(em_event_t event); + +/** + * Get the event types of multiple events + * + * Writes the event type of each given event into an output type-array and + * returns the number of entries written. + * Note, if 'events[num]' are all of the same type then 'types[num]' will + * contain 'num' same entries. + * + * @param events Event handles: events[num] + * @param[out] types Event types (output array): types[num] + * (types[i] is the type of events[i]) + * @param num Number of events and output types. + * The array 'events[]' must contain 'num' entries and the + * output array 'types[]' must have room for 'num' entries. + * + * @return Number of event types (0...num) written into 'types[]'. + * The return value (always >=0) is usually 'num' and thus '=0), includes and starts from events[0]. + * The return value is usually '>=1' and thus '0' is only seen in + * error scenarios when the type of the first event could not be + * obtained or if the given 'num=0'. + * The function stops and returns on the first error. + */ +int em_event_same_type_multi(const em_event_t events[], int num, + em_event_type_t *same_type /*out*/); + +/** + * Mark the event as "sent". + * + * Indicates a user-given promise to EM that the event will later appear into + * 'queue' by some means other than an explicit user call to em_send...(). + * Calling em_event_mark_send() transfers event ownership away from the user, + * and thus the event must not be used or touched by the user anymore (the only + * exception is (hw) error recovery where the "sent" state can be cancelled by + * using em_event_unmark_send() - dangerous!). + * + * Example use case: + * A user provided output-callback function associated with a queue of type + * 'EM_QUEUE_TYPE_OUTPUT' can use this API when configuring a HW-device to + * deliver the event back into EM. The HW will eventually "send" the event and + * it will "somehow" again appear into EM for the user to process. + * + * EM will, after this API-call, treat the event as "sent" and any further API + * operations or usage might lead to EM errors (depending on the error-check + * level), e.g. em_send/free/tmo_set/ack(event) etc. is forbidden after + * em_event_mark_send(event). + * + * @note Registered API-callback hooks for em_send...() (em_api_hook_send_t) + * will NOT be called. + * @note Marking an event "sent" with an event group (corresponding to + * em_send_group()) is currrently NOT supported. + * + * @param event Event to be marked as "sent" + * @param queue Destination queue (must be scheduled, i.e. atomic, + * parallel or parallel-ordered) + * + * @return EM_OK if successful + * + * @see em_send(), em_event_unmark_send() + */ +em_status_t em_event_mark_send(em_event_t event, em_queue_t queue); + +/** + * Unmark an event previously marked as "sent" (i.e mark as "unsent") + * + * @note This is for recovery situations only and can potenially crash the + * application if used incorrectly! + * + * Revert an event's "sent" state, as set by em_event_mark_send(), back to the + * state before the mark-send function call. + * Any further usage of the event after em_event_mark_send(), by EM or + * the user, will result in error when calling em_event_unmark_send() since the + * state has become unrecoverable. + * => the only allowed EM API call after em_event_mark_send() is + * em_event_unmark_send() if it is certain that the event, due to some + * external error, will never be sent into EM again otherwise. + * Calling em_event_unmark_send() transfers event ownership back to the user + * again. + * + * @note Unmark-send and unmark-free are the only valid cases of using an event + * that the user no longer owns - all other such uses leads to fatal error + * + * @code + * em_status_t err; + * hw_err_t hw_err; + * + * // 'event' owned by the user + * err = em_event_mark_send(event, queue); + * if (err != EM_OK) + * return err; // NOK + * // 'event' no longer owned by the user - don't touch! + * + * hw_err = config_hw_to_send_event(...hw-cfg..., event, queue); + * if (hw_err) { + * // hw config error - the event can be recovered if it is + * // certain that the hw won't send that same event. + * // note: the user doesn't own the event here and actually + * // uses an obsolete event handle to recover the event. + * err = em_event_unmark_send(event); + * if (err != EM_OK) + * return err; // NOK + * // 'event' recovered, again owned by the user + * em_free(event); + * } + * @endcode + * + * @param event Event previously marked as "sent" with em_event_mark_send(), + * any other case will be invalid! + * + * @return EM_OK if successful + * + * @see em_send(), em_event_mark_send() + */ +em_status_t em_event_unmark_send(em_event_t event); + +/** + * @brief Mark the event as "free". + * + * Indicates a user-given promise to EM that the event will be freed back into + * the pool it was allocated from e.g. by HW or device drivers (external to EM). + * Calling em_event_mark_free() transfers event ownership away from the user, + * and thus the event must not be used or touched by the user anymore. + * + * Example use case: + * A user provided output-callback function associated with a queue of type + * 'EM_QUEUE_TYPE_OUTPUT' can use this API when configuring a HW-device or + * device-driver to free the event (outside of EM) after transmission. + * + * EM will, after this API-call, treat the event as "freed" and any further API + * operations or usage might lead to EM errors (depending on the error-check + * level), e.g. em_send/free/tmo_set/ack(event) etc. is forbidden after + * em_event_mark_free(event). + * + * @note Registered API-callback hooks for em_free/_multi() (em_api_hook_free_t) + * will NOT be called. + * + * @param event Event to be marked as "free" + * + * @see em_free(), em_event_unmark_free() + */ +void em_event_mark_free(em_event_t event); + +/** + * @brief Unmark an event previously marked as "free" + * (i.e mark as "allocated" again). + * + * @note This is for recovery situations only and can potenially crash the + * application if used incorrectly! Unmarking the free-state of an event + * that has already been freed will lead to fatal error. + * + * Revert an event's "free" state, as set by em_event_mark_free(), back to the + * state before the mark-free function call. + * Any further usage of the event after em_event_mark_free(), by EM or the user, + * will result in error when calling em_event_unmark_free() since the state has + * become unrecoverable. + * => the only allowed EM API call after em_event_mark_free() (for a certain + * event) is em_event_unmark_free() when it is certain that the event, due to + * some external error, will not be freed otherwise and must be recovered + * back into the EM-domain so that calling em_free() by the user is possible. + * Calling em_event_unmark_free() transfers event ownership back to the user + * again. + * + * @note Unmark-send and unmark-free are the only valid cases of using an event + * that the user no longer owns - all other such uses leads to fatal error + * + * @code + * em_status_t err; + * hw_err_t hw_err; + * + * // 'event' owned by the user + * em_event_mark_free(event); + * // 'event' no longer owned by the user - don't touch! + * + * hw_err = config_hw_to_transmit_event(...hw-cfg..., event); + * if (hw_err) { + * // hw config error - the event can be recovered if it is + * // certain that the hw won't free that same event. + * // note: the user doesn't own the event here and actually + * // uses an obsolete event handle to recover the event. + * em_event_unmark_free(event); + * // 'event' recovered, again owned by the user + * em_free(event); + * } + * @endcode + * + * @param event Event previously marked as "free" with + * em_event_mark_free/_multi(), any other usecase is invalid! + * + * @see em_free(), em_event_mark_free() + */ +void em_event_unmark_free(em_event_t event); + +/** + * @brief Mark multiple events as "free". + * + * Similar to em_event_mark_free(), but allows the marking of multiple events + * as "free" with one function call. + * + * @note Registered API-callback hooks for em_free/_multi() (em_api_hook_free_t) + * will NOT be called. + * + * @param[in] events Array of events to be marked as "free" + * @param num The number of events in the array 'events[]' + */ +void em_event_mark_free_multi(const em_event_t events[], int num); + +/** + * @brief Unmark multiple events previously marked as "free". + * + * @note This is for recovery situations only and can potenially crash the + * application if used incorrectly! + * + * Similar to em_event_unmark_free(), but allows to do the "free"-unmarking of + * multiple events with one function call. + * + * @param[in] events Events previously marked as "free" with + * em_event_mark_free/_multi(), any other usecase is invalid! + * @param num The number of events in the array 'events[]' + */ +void em_event_unmark_free_multi(const em_event_t events[], int num); + +/** + * @brief Clone an event. + * + * Allocate a new event with identical payload to the given event. + * + * @note Event metadata, internal headers and state are _NOT_ cloned + * (e.g. the event-group of a cloned event is EM_EVENT_GROUP_UNDEF etc). + * + * @param event Event to be cloned, must be a valid event. + * @param pool Optional event pool to allocate the cloned event from. + * Use 'EM_POOL_UNDEF' to clone from the same pool as 'event' + * was allocated from. + * The event-type of 'event' must be suitable for allocation + * from 'pool' (e.g. EM_EVENT_TYPE_PACKET can not be + * allocated from a pool supporting only EM_EVENT_TYPE_SW) + * + * @return The cloned event or EM_EVENT_UNDEF on error. + * + * @see em_alloc(), em_free() + */ +em_event_t em_event_clone(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/); + +/** + * @brief Get a pointer to the event user area, optionally along with its size. + * + * The event user area is a fixed sized area located within the event metadata + * (i.e. outside of the event payload) that can be used to store application + * specific event related data without the need to adjust the payload. + * The event user area is configured during EM event pool creation and thus the + * size of the user area is set per pool. + * + * Note that the user area content is not initialized by EM, neither em_alloc() + * nor em_free() will touch it and thus it might contain old user data set the + * last time the area was used during a previous allocation of the same event. + * Since the user area is not part of the event payload, it will not be + * transmitted as part of a packet etc. + * + * @param event Event handle to get the user area of + * @param[out] size Optional output arg into which the user area size is + * stored. Use 'size=NULL' if no size information is needed. + * + * @return a pointer to the event user area + * @retval NULL on error or if the event contains no user area + * + * @see em_pool_create() for pool specific configuration and + * the EM runtime config file em-odp.conf for the default value: + * 'pool.user_area_size'. + * @see em_event_uarea_info() if both user area ptr and ID is needed + */ +void *em_event_uarea_get(em_event_t event, size_t *size/*out*/); + +/** + * @brief Get the event user area ID along with information if it has been set + * + * The event user area can be associated with an optional ID that e.g. can be + * used to identify the contents of the actual user area data. The ID is stored + * outside of the actual user area data and is available for use even if the + * user area size has been set to zero(0) for the pool the event was allocated + * from. + * + * This function is used to determine whether the user area ID has been set + * earlier and to retrieve the ID in the case it has been set. + * EM will initialize 'ID isset = false' when allocating a new event (indicating + * that the ID is not set). Use em_event_uarea_id_set() to set the ID. + * + * @param event Event handle to get the user area ID and "set"-status of + * @param[out] isset Optional output arg: has the ID been set previously? + * At least one of 'isset' and 'id' must be given (or both). + * @param[out] id Optional output arg into which the user area ID is + * stored if it has been set before. The output arg 'isset' + * should be used to determine whether 'id' has been set. + * Note: 'id' will not be touched if the ID has not been set + * earlier (i.e. when 'isset' is 'false'). + * At least one of 'isset' and 'id' must be given (or both). + * + * @return EM_OK if successful + * + * @see em_event_uarea_id_set(), em_event_uarea_get() + * @see em_event_uarea_info() if both user area ptr and ID is needed + */ +em_status_t em_event_uarea_id_get(em_event_t event, bool *isset /*out*/, + uint16_t *id /*out*/); + +/** + * @brief Set the event user area ID + * + * The event user area can be associated with an optional ID that e.g. can be + * used to identify the contents of the actual user area data. The ID is stored + * outside of the actual user area data and is available for use even if the + * user area size has been set to 0 for the pool the event was allocated from. + * + * This function is used to set the event user area ID for the given event. + * The 'set' operation overwrites any ID stored earlier. + * Use em_event_uarea_id_get() to check whether an ID has been set earlier and + * to retrieve the ID. + * + * @param event Event handle for which to set the user area ID + * @param id The user area ID to set + * + * @return EM_OK if successful + * + * @see em_event_uarea_id_get(), em_event_uarea_get(), em_event_uarea_info() + */ +em_status_t em_event_uarea_id_set(em_event_t event, uint16_t id); + +/** + * @brief Event user area information filled by em_event_uarea_info() + * + * Output structure for obtaining information about an event's user area. + * Information related to the user area will be filled into this struct by + * the em_event_uarea_info() API function. + * + * A user area is only present if the EM pool the event was allocated from + * was created with user area size > 0, see em_pool_cfg_t and em_pool_create(). + * The user area ID can always be used (set/get), even when the size of the + * user area is zero(0). + * + * @see em_event_uarea_info(), em_event_uarea_id_set() + */ +typedef struct { + /** Pointer to the event user area, NULL if event has no user area */ + void *uarea; + /** Size of the event user area, zero(0) if event has no user area */ + size_t size; + + /** Event user area ID (ID can be set/get even when no uarea present) */ + struct { + /** Boolean: has the ID been set previously? true/false */ + bool isset; + /** Value of the user area ID, if (and only if) set before. + * Only inspect '.id.value' when '.id.isset=true' indicating + * that ID has been set earlier by em_event_uarea_id_set(). + */ + uint16_t value; + } id; +} em_event_uarea_info_t; + +/** + * @brief Get the event user area information for a given event. + * + * Obtain information about the event user area for a certain given event. + * Information containing the user area pointer, size, as well as the ID is + * output via the 'uarea_info' struct. + * This API function combines the functionality of em_event_uarea_get() and + * em_event_uarea_id_get() for use cases where both the user area pointer as + * well as the ID is needed. Calling one API function instead of two might be + * faster due to a fewer checks and internal conversions. + * + * The event user area is a fixed sized area located within the event metadata + * (i.e. outside of the event payload) that can be used to store application + * specific event related data without the need to adjust the payload. + * The event user area is configured during EM event pool creation and thus the + * size of the user area is set per pool. + * + * Note that the user area content is not initialized by EM, neither em_alloc() + * nor em_free() will touch it and thus it might contain old user data set the + * last time the area was used during a previous allocation of the same event. + * Since the user area is not part of the event payload, it will not be + * transmitted as part of a packet etc. + * + * The event user area can be associated with an optional ID that can be used to + * identify the contents of the actual user area data. The ID is stored + * outside of the actual user area data and is available for use even if the + * user area size has been set to zero(0) for the pool the event was allocated + * from. EM will initialize 'uarea_info.id.isset = false' when allocating + * a new event (indicating that the ID is not set). + * + * @param event Event handle to get the user area information of. + * @param[out] uarea_info Output struct into which the user area information + * is stored. + * + * @return EM status code incidating success or failure of the operation. + * @retval EM_OK Operation successful. + * @retval Other Operation FAILED and no valid user area info could + * be obtained, 'uarea_info' is all NULL/zero(0) in this case. + * + * @see em_pool_create() for pool specific configuration and + * the EM runtime config file em-odp.conf for the default value: + * 'pool.user_area_size'. + * @see em_event_uarea_get(), em_event_uarea_id_get() + */ +em_status_t em_event_uarea_info(em_event_t event, + em_event_uarea_info_t *uarea_info /*out*/); +/* + * Event References + */ + +/** + * Create a reference to an event + * + * A reference is an additional event handle referring to an existing event. + * As long as an event has multiple references, none of them (including 'event') + * should be used to modify the event. Reading event data from a reference is + * allowed. Writes to the event data must only be done when there is a + * single event handle left, i.e. when em_event_has_ref(event) returns 'false'. + * Results are undefined if these restrictions are not observed. + * + * The event is freed when the last reference, including the original event, + * is freed. + * + * Currently only references to events of (major) type EM_EVENT_PACKET can be + * created. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group()/em_send_group_multi() to send a reference is wrong. + * + * @param event Event handle for which a reference is to be created. + * + * @return Reference to the event + * @retval EM_EVENT_UNDEF on failure + */ +em_event_t em_event_ref(em_event_t event); + +/** + * Test if an event has references + * + * An event that has multiple references share data with other events and thus + * the (shared) data must not be modified. + * + * New references are created with the em_event_ref() API call. The intent of + * multiple references is to avoid event copies. When a reference is created, + * this function returns 'true' for both events (i.e. for the original event and + * the new reference). + * + * @param event Event handle + * + * @retval false This event has no references + * @retval true The event has multiple references + */ +bool em_event_has_ref(em_event_t event); + +/* + * Event Vectors + * Event (major) Type: EM_EVENT_TYPE_VECTOR + * + * Vector events contain a table of events. + * All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + * Storing events of another type into the event-table is an error and leads to + * undefined behaviour. + * Event vector pools are created with em_pool_create() with the pool event-type + * set to EM_EVENT_TYPE_VECTOR. Event vectors can then be allocated from vector + * pools by calling em_alloc(..., vector_pool). + * To free the vector event along with all events it contains, use em_free() or + * em_free_multi(). + * To free the vector event only, not the events it contains, + * use em_event_vector_free(). + */ + +/** + * @brief Free the vector event only, not the events it contains. + * + * Frees only the vector event itself and not the events it contains in its + * vector-table. + * To free the vector event along with all events it contains, + * use em_free() or em_free_multi(). + * + * @param vector_event Vector event handle + */ +void em_event_vector_free(em_event_t vector_event); + +/** + * @brief Get the event vector table from an event of (major) type + * EM_EVENT_TYPE_VECTOR. + * + * The event vector table is an array of event handles (em_event_t) stored in + * a contiguous memory location for events with major event type set to + * EM_EVENT_TYPE_VECTOR. + * Upon completion of this API, this function returns the event table pointer of + * the given vector event via the output argument 'event_tbl'. + * + * All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + * Storing events of another type into the event-table is an error and leads to + * undefined behaviour. + * + * @param vector_event Vector event handle + * @param[out] event_tbl Pointer into which the event table (ptr) is written + * + * @return Number of event handles available (set) in the vector event. + * + * @note A newly allocated vector has an empty event-table and thus the returned + * size is zero until updated with em_event_vector_size_set(). + * + * @code + * em_event_type_t event_type = em_event_get_type(vector_event); + * + * if (em_event_type_major(event_type) == EM_EVENT_TYPE_VECTOR) { + * em_event_t *event_tbl; + * uint32_t num = em_event_vector_tbl(vector_event, &event_tbl); + * + * if (!num) + * return; + * + * for (uint32_t i = 0; i < num; i++) { + * event = event_tbl[i]; + * ... process 'event' ... + * } + * } + * @endcode + * + * @see em_event_vector_size_set() + */ +uint32_t em_event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl/*out*/); + +/** + * @brief Number of event handles available (set) in a vector. + * + * Returns the number of available events in the vector. + * Note that the returned 'size' indicates how many valid events are stored in + * the vector's event-table. + * The 'size' neither indicates the alloc-size (size given to em_alloc()) nor + * the max-size of the event-table, use em_event_vector_max_size/info() to + * obtain that information. + * + * All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + * Storing events of another type into the event-table is an error and leads to + * undefined behaviour. + * + * @param vector_event Vector event handle + * + * @return The number of event handles available (set) in the vector + * + * @note A newly allocated vector has an empty event-table and thus the returned + * size is zero until updated with em_event_vector_size_set() + */ +uint32_t em_event_vector_size(em_event_t vector_event); + +/** + * @brief Set the number of event handles stored in a vector + * + * Update the number of event handles stored in a vector. + * This function shall be used to set the number of events available in the + * given vector when the application itself is producing (or updating) the + * event vector. Only valid event handles can be stored into the vector's + * event-table + * + * All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + * Storing events of another type into the event-table is an error and leads to + * undefined behaviour. + * + * @param vector_event Vector event handle + * @param size Number of event handles in the vector + * + * @note The maximum number of event handles the vector can hold is defined by + * em_pool_cfg_t::subpool[i].size or can be obtained for a specific vector + * with em_event_vector_max_size(vector_event). + * + * @note All handles in the vector table (0 ... size - 1) need to be valid + * event handles. + */ +void em_event_vector_size_set(em_event_t vector_event, uint32_t size); + +/** + * @brief Maximum number of event handles that can be stored in a vector. + * + * Returns the maximum number of events that can be stored into the event-table + * of the given vector event, i.e. the max-size that can be used with + * em_event_vector_size_set(). + * The max-size might be larger than the size requested during allocation and + * instead reflects the vector-size used when creating the EM event vector pool. + * + * All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + * Storing events of another type into the event-table is an error and leads to + * undefined behaviour. + * + * @param vector_event Vector event handle + * + * @return The maximum number of event handles that can be stored in the vector + * @retval >0 on success + * @retval 0 on failure or if EM can't retrieve the max size (non-EM pool). + */ +uint32_t em_event_vector_max_size(em_event_t vector_event); + +/** + * @brief Vector event information filled by em_event_vector_info() + * + * Output structure for obtaining information about a vector event. + * Information related to the vector event will be filled into this struct by + * the em_event_vector_info() API function. + */ +typedef struct { + /** Pointer to the vector's event-table */ + em_event_t *event_tbl; + /** Number of available/set valid events in the vectors event-table */ + uint32_t size; + /** + * Max number of events that can fit into the vector's event-table, + * 0 on failure or if EM can't retrieve the max size (non-EM pool). + */ + uint32_t max_size; +} em_event_vector_info_t; + +/** + * @brief Retrieve information about the given vector event. + * + * Vector event information is output via the struct 'vector_info'. + * Combines the APIs em_event_vector_tbl(), em_event_vector_size() and + * em_event_vector_max_size() into one API call. + * + * All events in the event-table must be of major type EM_EVENT_TYPE_PACKET. + * Storing events of another type into the event-table is an error and leads to + * undefined behaviour. + * + * @param vector_event Vector event handle to get the information of. + * @param[out] vector_info Output struct into which the vector information + * is stored. + * + * @return EM status code incidating success or failure of the operation. + * @retval EM_OK Operation successful. + * @retval Other Operation FAILED and no valid vector info could + * be obtained, 'vector_info' is all NULL/zero(0) in this case. + */ +em_status_t em_event_vector_info(em_event_t vector_event, + em_event_vector_info_t *vector_info /*out*/); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_EVENT_H_ */ diff --git a/include/event_machine/api/event_machine_event_group.h b/include/event_machine/api/event_machine_event_group.h index 8916e95c..a206efb0 100644 --- a/include/event_machine/api/event_machine_event_group.h +++ b/include/event_machine/api/event_machine_event_group.h @@ -1,451 +1,465 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_EVENT_GROUP_H_ -#define EVENT_MACHINE_EVENT_GROUP_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_event_group Event group - * Event Machine fork-join helper. - * @{ - * - * An event group can be used to trigger a join of parallel operations in the - * form of notification events. The number of parallel operations needs to be - * known in advance by the event group creator, but the separate event handlers - * don't necessarily need to know anything about the other related events. - * An event group is functionally a shared atomic counter decremented when each - * related event has been handled (EO-receive() returns). The notification - * events are automatically sent once the count reaches zero. - * - * There are two separate main usage patterns: - * - * Sender originated (original): - * ---------------------------- - * 1. an event group is allocated with em_event_group_create(). - * - * 2. the number of parallel events and the notifications are set with - * em_event_group_apply(). - * - * 3. the (parallel) events are sent normally but using em_send_group() instead - * of em_send(). This tags the event with the given event group. - * - * 4. once received by a core the tag is used to switch core specific current - * event group to the one in the tag. The receiver EO handles the event - * normally (does not see any difference). - * - * 5. as the receive function returns the count of the current event group is - * decremented. If the count reaches zero (last event) the related - * notification event(s) are sent automatically and can trigger the next - * operation for the application. - * - * 6. the sequence can continue from step 2 for a new set of events if the - * event group is to be reused. - * - * Receiver originated (API 1.2): - * ----------------------------- - * 1. an event group is created with em_event_group_create(). - * - * 2. the number of parallel events and the notifications are set with - * em_event_group_apply(). - * - * 3. during the processing of any received event that is not already tagged to - * belong to an event group, em_event_group_assign() can be used to set the - * current event group (a core local value). The rest is then equivalent to - * as if the event was originally sent to an event group. - * - * 4. as the receive function returns the count of the current event group is - * decremented. If the count reaches zero (last event) the related - * notification event(s) are sent automatically and can trigger the next - * operation for the application. - * - * 5. the sequence can continue from step 2 for a new set of events if the - * event group is to be reused. - * - * - * From an application (EO) point of view, an event group can get activated - * either by entering the EO receive with an event tagged to an event group or - * by explicitly calling em_event_group_assign. The current event group is core - * local and only one event group can be active (current) at a time. - * Assigning a received event that already is tagged to an event group, e.g. - * sent with em_send_group(), is not allowed unless the event group is - * deactivated first with em_event_group_processing_end(). - * The current event group gets deactivated by exiting the EO receive function - * or by explicitly calling em_event_group_processing_end(). Deactivation means - * the count of the event group is decremented and if the count reaches zero - * the notification events are sent. - * The current event group is local to a core (dispatcher) and exists only - * within the EO receive function. - * - * Note, that event groups may only work with events that are to be handled by - * an EO, i.e. SW events. - * - * OpenEM implementation should internally use a generation count or other - * technique to make sure that em_event_group_abort() can stop a problem - * propagation, i.e. after a group is aborted (and applied a new count) any - * potential delayed event(s) from the previous cycle will not cause the new - * count to be decremented. - * The same should be valid for excess group events, i.e. when sending more - * than the applied count. - * To make it possible for the application to properly handle such problems, - * the implementation should pre-check incoming events and call error handler - * before giving the event to an EO. This makes it possible for the application - * to choose whether to drop those events (at the error handler) or let them be - * processed. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include - -/** - * Create a new event group for fork-join. - * - * The amount of simultaneous event groups can be limited. - * - * @return The new event group or EM_EVENT_GROUP_UNDEF if no event group is - * available. - * - * @see em_event_group_delete(), em_event_group_apply() - */ -em_event_group_t em_event_group_create(void); - -/** - * Delete (unallocate) an event group. - * - * An event group must not be deleted before it has been completed - * (count reached zero) or aborted. A created but never applied event group - * can be deleted. - * - * @param event_group Event group to delete - * - * @return EM_OK if successful. - * - * @see em_event_group_create(), em_event_group_abort() - */ -em_status_t em_event_group_delete(em_event_group_t event_group); - -/** - * Apply event group configuration. - * - * This function sets the event count and notification parameters for the event - * group. After it returns, events sent or assigned to the event group are - * counted against the current count value. Notification events are sent when - * all (counted) events have been processed (count is decremented at EO receive - * return or by calling em_event_group_processing_end()). A new apply call is - * needed to re-use the event group for another cycle (with a new count and - * notifications). - * - * Notification events can optionally be sent to/tagged with another event - * group but not with the same event group that triggered the notifications, - * see em_notif_t for more. - * - * @attention em_event_group_apply() can only be used on a newly created event - * group or when the previous cycle is completed or successfully aborted. - * Application can use em_event_group_is_ready() to detect whether apply is - * allowed but would normally use a notification to setup a new cycle - * (implementation must make sure that when any of the notifications is - * received the group is ready for new apply). - * - * Apply should only be called once per group cycle. - * - * @param event_group Event group - * @param count Number of events in the group (positive integer) - * @param num_notif Number of notification events to send - * @param notif_tbl Table of notifications (events and target queues) - * - * @return EM_OK if successful. - * - * @see em_event_group_create(), em_send_group(), em_event_group_is_ready(), - * em_notif_t - */ -em_status_t em_event_group_apply(em_event_group_t event_group, int count, - int num_notif, const em_notif_t notif_tbl[]); - -/** - * Increment the current event group count. - * - * Increments the event count of the currently active event group (received or - * assigned event). Enables sending new events into the current event group. - * The event count cannot be decremented and this will fail if there is no - * current event group. - * - * @param count Number of events to add to the event group (positive integer) - * - * @return EM_OK if successful. - * - * @see em_send_group(), em_event_group_apply() - */ -em_status_t em_event_group_increment(int count); - -/** - * Checks if the event group is ready for 'apply'. - * - * Returns EM_TRUE (1) if the given event group is ready, i.e. the user can do - * em_event_group_apply() again. A better alternative to this is to use a - * related notification event to re-use the event group (apply can always be - * used when handling a notification event from the event group). - * - * An event group that has been applied a count but no events sent is not - * considered 'ready for apply'. If a change is needed the group has to be - * aborted and then re-applied. - * - * Return value EM_TRUE does not guarantee all notifications are received nor - * handled, but the event group count has reached zero and the event group - * is ready for a new apply. - * - * @param event_group Event group - * - * @return EM_TRUE if the given event group is ready for apply - * - * @see em_event_group_create(), em_event_group_apply() - */ -int em_event_group_is_ready(em_event_group_t event_group); - -/** - * Return the currently active event group. - * - * Returns the current event group or EM_EVENT_GROUP_UNDEF if an event group is - * not active (i.e. never activated or deactivated using - * em_event_group_processing_end()). - * - * Can only be used within an EO receive function. - * - * @return Current event group or EM_EVENT_GROUP_UNDEF - * - * @see em_event_group_create() - */ -em_event_group_t em_event_group_current(void); - -/** - * Send event associated with/tagged to an event group. - * - * Any valid event and destination queue parameters can be used. The event - * group indicates which event group the event is tagged to. The event group - * has to first be created and applied a count. - * One should always send the correct amount of events to an event group, i.e. - * matching the applied count. - * - * Event group is not supported with unscheduled queues. - * - * @param event Event to send - * @param queue Destination queue - * @param event_group Event group - * - * @return EM_OK if successful. - * - * @see em_send(), em_event_group_create(), em_event_group_apply(), - * em_event_group_increment() - */ -em_status_t em_send_group(em_event_t event, em_queue_t queue, - em_event_group_t event_group); - -/** - * Send multiple events associated with/tagged to an event group. - * - * This is like em_send_group, but multiple events can be sent with one call - * for potential performance gain. - * The call returns the number of events actually sent. A return value equal to - * 'num' means that all events were sent. A value less than 'num' means the - * events at the end of the given event list were not sent and must be handled - * by the application. - * The function will not modify the given list of events. - * - * Event group is not supported with unscheduled queues. - * - * @param events List of events to send (i.e. ptr to array of events) - * @param num Number of events - * @param queue Destination queue - * @param event_group Event group - * - * @return number of events successfully sent (equal to num if all successful) - * - * @see em_send_group() - */ -int em_send_group_multi(const em_event_t events[], int num, em_queue_t queue, - em_event_group_t event_group); - -/** - * Signal early end of processing of the current event group - * - * This is an optional call that can be used to move the implicit event group - * handling (decrementing the count) from exiting event receive function to the - * point of this call - the current event group count is decremented - * immediately and if it reaches zero the notifications are also sent. In that - * case the group will be ready for a new apply after this returns. - * - * This impacts the current event group the same way whether it was activated - * by receiving a tagged event or EO called em_event_group_assign(). - * - * This call does not change potential atomicity or ordering for the current - * event and is a no-operation if called while an event group is not active - * (no current group). - * - * Can only be used within the EO receive function. - */ -void em_event_group_processing_end(void); - -/** - * Assign core local current event group. - * - * The assign functionality can be used to set the core local current event - * group. The event group handling after the assign call is identical to - * the handling of an event group that was originally set by sending an event - * tagged to that event group, i.e. the core local current event group - * is active and will be operated on in a normal way. - * Assign will fail if there already is an active current event group, i.e. - * only one event group can be active at a time (per core). - * - * This needs to be used with care, i.e. match the amount of events applied - * and assigned. - * - * @param event_group An applied event group to assign to - * - * @return EM_OK if assignment was successful - */ -em_status_t em_event_group_assign(em_event_group_t event_group); - -/** - * Abort the ongoing event group. - * - * This is a recovery operation to abort an ongoing event group in case it does - * not get completed. This will reset the group back to a state ready for - * a new apply. Note, that there is a potential race as the group could get - * completed on another thread while executing this (e.g. a delayed event is - * finally received and processed). Implementation will synchronize internal - * state changes, but this call may succeed or fail depending on timing so - * abort should be done with care for recovery purpose only. - * - * Notification events related to the ongoing (to be aborted) cycle can be - * managed as follows - * 1) save possible related notifications using em_event_group_get_notif() - * 2) call em_event_group_abort() - * 3) IF em_event_group_abort() returns EM_OK the operation was successfully - * completed meaning the earlier notifications will not be sent thus the - * saved notifications can be freed or re-used. Otherwise the call was made - * too late and the saved notifications must not be touched as they are to - * be sent. - * - * This means the synchronization point is em_event_group_abort(), not - * em_event_group_get_notif() which might return notifications that will still - * be sent. - * - * @attention Related notification events will not be automatically freed in - * any case and must be handled by the application. - * - * @param event_group Event group to abort and reset - * - * @return EM_OK if the call was made early enough to cleanly abort, i.e. - * before the last event was processed. EM_OK also means the - * notifications will not be sent. - */ -em_status_t em_event_group_abort(em_event_group_t event_group); - -/** - * Return notification events currently related to an applied event group. - * - * This returns the current notifications or none (0) if they were already sent - * (event group completed). - * - * @attention This is not a synchronization point, which means - * em_event_group_get_notif() could return notifications which - * are just going to be sent and thus should not be touched. - * - * @param event_group Event group - * @param max_notif Maximum number of notifications to return - * @param[out] notif_tbl Table for notifications to fill - * - * @return Number of returned notifications - * - * @see em_event_group_apply(), em_event_group_abort() - */ -int em_event_group_get_notif(em_event_group_t event_group, - int max_notif, em_notif_t notif_tbl[]); - -/** - * Initialize event group iteration and return the first event group handle. - * - * Can be used to initialize the iteration to retrieve all created event groups - * for debugging or management purposes. Use em_event_group_get_next() after - * this call until it returns EM_EVENT_GROUP_UNDEF. - * A new call to em_event_group_get_first() resets the iteration, which is - * maintained per core (thread). The operation should be completed in one go - * before returning from the EO's event receive function (or start/stop). - * - * The number of event groups (output arg 'num') may not match the amount of - * event groups actually returned by iterating using em_event_group_get_next() - * if event groups are added or removed in parallel by another core. The order - * of the returned event group handles is undefined. - * - * @code - * unsigned int num; - * em_event_group_t eg = em_event_group_get_first(&num); - * while (eg != EM_EVENT_GROUP_UNDEF) { - * eg = em_event_group_get_next(); - * } - * @endcode - * - * @param[out] num Pointer to an unsigned int to store the amount of - * event groups into - * @return The first event group handle or EM_EVENT_GROUP_UNDEF if none exist - * - * @see em_event_group_get_next() - **/ -em_event_group_t -em_event_group_get_first(unsigned int *num); - -/** - * Return the next event group handle. - * - * Continues the event group iteration started by em_event_group_get_first() and - * returns the next event group handle. - * - * @return The next event group handle or EM_EVENT_GROUP_UNDEF if the event - * group iteration is completed (i.e. no more event groups available). - * - * @see em_event_group_get_first() - **/ -em_event_group_t -em_event_group_get_next(void); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_EVENT_GROUP_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_EVENT_GROUP_H_ +#define EVENT_MACHINE_EVENT_GROUP_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_event_group Event group + * Event Machine fork-join helper. + * @{ + * + * An event group can be used to trigger a join of parallel operations in the + * form of notification events. The number of parallel operations needs to be + * known in advance by the event group creator, but the separate event handlers + * don't necessarily need to know anything about the other related events. + * An event group is functionally a shared atomic counter decremented when each + * related event has been handled (EO-receive() returns). The notification + * events are automatically sent once the count reaches zero. + * + * There are two separate main usage patterns: + * + * Sender originated (original): + * ---------------------------- + * 1. an event group is allocated with em_event_group_create(). + * + * 2. the number of parallel events and the notifications are set with + * em_event_group_apply(). + * + * 3. the (parallel) events are sent normally but using em_send_group() instead + * of em_send(). This tags the event with the given event group. + * + * 4. once received by a core the tag is used to switch core specific current + * event group to the one in the tag. The receiver EO handles the event + * normally (does not see any difference). + * + * 5. as the receive function returns the count of the current event group is + * decremented. If the count reaches zero (last event) the related + * notification event(s) are sent automatically and can trigger the next + * operation for the application. + * + * 6. the sequence can continue from step 2 for a new set of events if the + * event group is to be reused. + * + * Receiver originated (API 1.2): + * ----------------------------- + * 1. an event group is created with em_event_group_create(). + * + * 2. the number of parallel events and the notifications are set with + * em_event_group_apply(). + * + * 3. during the processing of any received event that is not already tagged to + * belong to an event group, em_event_group_assign() can be used to set the + * current event group (a core local value). The rest is then equivalent to + * as if the event was originally sent to an event group. + * + * 4. as the receive function returns the count of the current event group is + * decremented. If the count reaches zero (last event) the related + * notification event(s) are sent automatically and can trigger the next + * operation for the application. + * + * 5. the sequence can continue from step 2 for a new set of events if the + * event group is to be reused. + * + * From an application (EO) point of view, an event group can get activated + * either by entering the EO receive with an event tagged to an event group or + * by explicitly calling em_event_group_assign. The current event group is core + * local and only one event group can be active (current) at a time. + * Assigning a received event that already is tagged to an event group, e.g. + * sent with em_send_group(), is not allowed unless the event group is + * deactivated first with em_event_group_processing_end(). + * The current event group gets deactivated by exiting the EO receive function + * or by explicitly calling em_event_group_processing_end(). Deactivation means + * the count of the event group is decremented and if the count reaches zero + * the notification events are sent. + * The current event group is local to a core (dispatcher) and exists only + * within the EO receive function. + * + * Note, that event groups may only work with events that are to be handled by + * an EO, i.e. SW events. + * + * OpenEM implementation should internally use a generation count or other + * technique to make sure that em_event_group_abort() can stop a problem + * propagation, i.e. after a group is aborted (and applied a new count) any + * potential delayed event(s) from the previous cycle will not cause the new + * count to be decremented. + * The same should be valid for excess group events, i.e. when sending more + * than the applied count. + * To make it possible for the application to properly handle such problems, + * the implementation should pre-check incoming events and call error handler + * before giving the event to an EO. This makes it possible for the application + * to choose whether to drop those events (at the error handler) or let them be + * processed. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group()/em_send_group_multi() to send a reference is wrong. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +/** + * Create a new event group for fork-join. + * + * The amount of simultaneous event groups can be limited. + * + * @return The new event group or EM_EVENT_GROUP_UNDEF if no event group is + * available. + * + * @see em_event_group_delete(), em_event_group_apply() + */ +em_event_group_t em_event_group_create(void); + +/** + * Delete (unallocate) an event group. + * + * An event group must not be deleted before it has been completed + * (count reached zero) or aborted. A created but never applied event group + * can be deleted. + * + * @param event_group Event group to delete + * + * @return EM_OK if successful. + * + * @see em_event_group_create(), em_event_group_abort() + */ +em_status_t em_event_group_delete(em_event_group_t event_group); + +/** + * Apply event group configuration. + * + * This function sets the event count and notification parameters for the event + * group. After it returns, events sent or assigned to the event group are + * counted against the current count value. Notification events are sent when + * all (counted) events have been processed (count is decremented at EO receive + * return or by calling em_event_group_processing_end()). A new apply call is + * needed to re-use the event group for another cycle (with a new count and + * notifications). + * + * Notification events can optionally be sent to/tagged with another event + * group but not with the same event group that triggered the notifications, + * see em_notif_t for more. + * + * @attention em_event_group_apply() can only be used on a newly created event + * group or when the previous cycle is completed or successfully aborted. + * Application can use em_event_group_is_ready() to detect whether apply is + * allowed but would normally use a notification to setup a new cycle + * (implementation must make sure that when any of the notifications is + * received the group is ready for new apply). + * + * Apply should only be called once per group cycle. + * + * @param event_group Event group + * @param count Number of events in the group (positive integer) + * @param num_notif Number of notification events to send + * @param notif_tbl Table of notifications (events and target queues) + * + * @return EM_OK if successful. + * + * @see em_event_group_create(), em_send_group(), em_event_group_is_ready(), + * em_notif_t + */ +em_status_t em_event_group_apply(em_event_group_t event_group, int count, + int num_notif, const em_notif_t notif_tbl[]); + +/** + * Increment the current event group count. + * + * Increments the event count of the currently active event group (received or + * assigned event). Enables sending new events into the current event group. + * The event count cannot be decremented and this will fail if there is no + * current event group. + * + * @param count Number of events to add to the event group (positive integer) + * + * @return EM_OK if successful. + * + * @see em_send_group(), em_event_group_apply() + */ +em_status_t em_event_group_increment(int count); + +/** + * Checks if the event group is ready for 'apply'. + * + * Returns EM_TRUE (1) if the given event group is ready, i.e. the user can do + * em_event_group_apply() again. A better alternative to this is to use a + * related notification event to re-use the event group (apply can always be + * used when handling a notification event from the event group). + * + * An event group that has been applied a count but no events sent is not + * considered 'ready for apply'. If a change is needed the group has to be + * aborted and then re-applied. + * + * Return value EM_TRUE does not guarantee all notifications are received nor + * handled, but the event group count has reached zero and the event group + * is ready for a new apply. + * + * @param event_group Event group + * + * @return EM_TRUE if the given event group is ready for apply + * + * @see em_event_group_create(), em_event_group_apply() + */ +int em_event_group_is_ready(em_event_group_t event_group); + +/** + * Return the currently active event group. + * + * Returns the current event group or EM_EVENT_GROUP_UNDEF if an event group is + * not active (i.e. never activated or deactivated using + * em_event_group_processing_end()). + * + * Can only be used within an EO receive function. + * + * @return Current event group or EM_EVENT_GROUP_UNDEF + * + * @see em_event_group_create() + */ +em_event_group_t em_event_group_current(void); + +/** + * Send event associated with/tagged to an event group. + * + * Any valid event and destination queue parameters can be used. The event + * group indicates which event group the event is tagged to. The event group + * has to first be created and applied a count. + * One should always send the correct amount of events to an event group, i.e. + * matching the applied count. + * + * Event group is not supported with unscheduled queues. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group() to send a reference is wrong. + * + * @param event Event to send + * @param queue Destination queue + * @param event_group Event group + * + * @return EM_OK if successful. + * + * @see em_send(), em_event_group_create(), em_event_group_apply(), + * em_event_group_increment() + */ +em_status_t em_send_group(em_event_t event, em_queue_t queue, + em_event_group_t event_group); + +/** + * Send multiple events associated with/tagged to an event group. + * + * This is like em_send_group, but multiple events can be sent with one call + * for potential performance gain. + * The call returns the number of events actually sent. A return value equal to + * 'num' means that all events were sent. A value less than 'num' means the + * events at the end of the given event list were not sent and must be handled + * by the application. + * The function will not modify the given list of events. + * + * Event group is not supported with unscheduled queues. + * + * It is not allowed to use event references with event groups since assigning + * an event that has references to an event group would assign all the + * references to the event group resulting in undefined behaviour. E.g. using + * em_send_group_multi() to send references is wrong. + * + * @param events List of events to send (i.e. ptr to array of events) + * @param num Number of events + * @param queue Destination queue + * @param event_group Event group + * + * @return number of events successfully sent (equal to num if all successful) + * + * @see em_send_group() + */ +int em_send_group_multi(const em_event_t events[], int num, em_queue_t queue, + em_event_group_t event_group); + +/** + * Signal early end of processing of the current event group + * + * This is an optional call that can be used to move the implicit event group + * handling (decrementing the count) from exiting event receive function to the + * point of this call - the current event group count is decremented + * immediately and if it reaches zero the notifications are also sent. In that + * case the group will be ready for a new apply after this returns. + * + * This impacts the current event group the same way whether it was activated + * by receiving a tagged event or EO called em_event_group_assign(). + * + * This call does not change potential atomicity or ordering for the current + * event and is a no-operation if called while an event group is not active + * (no current group). + * + * Can only be used within the EO receive function. + */ +void em_event_group_processing_end(void); + +/** + * Assign core local current event group. + * + * The assign functionality can be used to set the core local current event + * group. The event group handling after the assign call is identical to + * the handling of an event group that was originally set by sending an event + * tagged to that event group, i.e. the core local current event group + * is active and will be operated on in a normal way. + * Assign will fail if there already is an active current event group, i.e. + * only one event group can be active at a time (per core). + * + * This needs to be used with care, i.e. match the amount of events applied + * and assigned. + * + * @param event_group An applied event group to assign to + * + * @return EM_OK if assignment was successful + */ +em_status_t em_event_group_assign(em_event_group_t event_group); + +/** + * Abort the ongoing event group. + * + * This is a recovery operation to abort an ongoing event group in case it does + * not get completed. This will reset the group back to a state ready for + * a new apply. Note, that there is a potential race as the group could get + * completed on another thread while executing this (e.g. a delayed event is + * finally received and processed). Implementation will synchronize internal + * state changes, but this call may succeed or fail depending on timing so + * abort should be done with care for recovery purpose only. + * + * Notification events related to the ongoing (to be aborted) cycle can be + * managed as follows + * 1) save possible related notifications using em_event_group_get_notif() + * 2) call em_event_group_abort() + * 3) IF em_event_group_abort() returns EM_OK the operation was successfully + * completed meaning the earlier notifications will not be sent thus the + * saved notifications can be freed or re-used. Otherwise the call was made + * too late and the saved notifications must not be touched as they are to + * be sent. + * + * This means the synchronization point is em_event_group_abort(), not + * em_event_group_get_notif() which might return notifications that will still + * be sent. + * + * @attention Related notification events will not be automatically freed in + * any case and must be handled by the application. + * + * @param event_group Event group to abort and reset + * + * @return EM_OK if the call was made early enough to cleanly abort, i.e. + * before the last event was processed. EM_OK also means the + * notifications will not be sent. + */ +em_status_t em_event_group_abort(em_event_group_t event_group); + +/** + * Return notification events currently related to an applied event group. + * + * This returns the current notifications or none (0) if they were already sent + * (event group completed). + * + * @attention This is not a synchronization point, which means + * em_event_group_get_notif() could return notifications which + * are just going to be sent and thus should not be touched. + * + * @param event_group Event group + * @param max_notif Maximum number of notifications to return + * @param[out] notif_tbl Table for notifications to fill + * + * @return Number of returned notifications + * + * @see em_event_group_apply(), em_event_group_abort() + */ +int em_event_group_get_notif(em_event_group_t event_group, + int max_notif, em_notif_t notif_tbl[]); + +/** + * Initialize event group iteration and return the first event group handle. + * + * Can be used to initialize the iteration to retrieve all created event groups + * for debugging or management purposes. Use em_event_group_get_next() after + * this call until it returns EM_EVENT_GROUP_UNDEF. + * A new call to em_event_group_get_first() resets the iteration, which is + * maintained per core (thread). The operation should be completed in one go + * before returning from the EO's event receive function (or start/stop). + * + * The number of event groups (output arg 'num') may not match the amount of + * event groups actually returned by iterating using em_event_group_get_next() + * if event groups are added or removed in parallel by another core. The order + * of the returned event group handles is undefined. + * + * @code + * unsigned int num; + * em_event_group_t eg = em_event_group_get_first(&num); + * while (eg != EM_EVENT_GROUP_UNDEF) { + * eg = em_event_group_get_next(); + * } + * @endcode + * + * @param[out] num Pointer to an unsigned int to store the amount of + * event groups into + * @return The first event group handle or EM_EVENT_GROUP_UNDEF if none exist + * + * @see em_event_group_get_next() + **/ +em_event_group_t +em_event_group_get_first(unsigned int *num); + +/** + * Return the next event group handle. + * + * Continues the event group iteration started by em_event_group_get_first() and + * returns the next event group handle. + * + * @return The next event group handle or EM_EVENT_GROUP_UNDEF if the event + * group iteration is completed (i.e. no more event groups available). + * + * @see em_event_group_get_first() + **/ +em_event_group_t +em_event_group_get_next(void); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_EVENT_GROUP_H_ */ diff --git a/include/event_machine/api/event_machine_types.h b/include/event_machine/api/event_machine_types.h index b1f211bb..a23329bb 100644 --- a/include/event_machine/api/event_machine_types.h +++ b/include/event_machine/api/event_machine_types.h @@ -1,541 +1,549 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_TYPES_H_ -#define EVENT_MACHINE_TYPES_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * - * Event Machine basic types - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -/** EM boolean values. */ -#define EM_TRUE 1 /**< True */ -#define EM_FALSE 0 /**< False */ - -/** - * @typedef em_event_t - * Event handle - */ -EM_HANDLE_T(em_event_t); -/** Undefined event */ -#define EM_EVENT_UNDEF EM_STATIC_CAST(em_event_t, EM_HDL_UNDEF) -/** em_event_t printf format */ -#define PRI_EVENT PRI_HDL - -/** - * @typedef em_event_type_t - * Event type - * - * The event type is given to the EO-receive function for each received event - * and is also needed for event allocation. This type is an integer that is - * split into major and minor parts: - * 1) the major-field categorizes the event and - * 2) the minor is a more detailed system specific description. - * The major-part will not change by HW, but the minor-part can be - * HW/SW platform specific and thus could be split into more sub-fields as - * needed. The application should use the access functions for reading major - * and minor parts. - * - * The only event type with defined content is EM_EVENT_TYPE_SW with - * minor type 0, which needs to be portable (direct pointer to data). - * - * @see em_get_type_major(), em_get_type_minor(), em_receive_func_t() - */ -typedef uint32_t em_event_type_t; - -/** - * @typedef em_eo_t - * Execution Object handle - * - * @see em_eo_create() - */ -EM_HANDLE_T(em_eo_t); -/** Undefined EO */ -#define EM_EO_UNDEF EM_STATIC_CAST(em_eo_t, EM_HDL_UNDEF) -/** em_eo_t printf format */ -#define PRI_EO PRI_HDL - -/** - * @typedef em_queue_t - * Queue handle - * - * @see em_queue_create(), em_receive_func_t(), em_send() - */ -EM_HANDLE_T(em_queue_t); -/** Undefined queue */ -#define EM_QUEUE_UNDEF EM_STATIC_CAST(em_queue_t, EM_HDL_UNDEF) -/** em_queue_t printf format */ -#define PRI_QUEUE PRI_HDL - -/** - * @typedef em_queue_group_t - * Queue Group handle - * - * Each queue belongs to one queue group that defines a core mask for - * scheduling events, i.e. defines which cores participate in load balancing. - * A queue group can also allow only a single core for no load balancing. - * - * Queue groups need to be created as needed. One default queue group, i.e. - * EM_QUEUE_GROUP_DEFAULT, always exists, and that allows scheduling to all the - * EM cores running this execution binary instance. - * - * @see em_queue_group_create() - */ -EM_HANDLE_T(em_queue_group_t); -/** Undefined queue group */ -#define EM_QUEUE_GROUP_UNDEF EM_STATIC_CAST(em_queue_group_t, EM_HDL_UNDEF) -/** em_queue_group_t printf format */ -#define PRI_QGRP PRI_HDL - -/** - * @typedef em_event_group_t - * Event Group handle - * - * This is used for fork-join event handling. - * - * @see em_event_group_create() - */ -EM_HANDLE_T(em_event_group_t); -/** Undefined event group */ -#define EM_EVENT_GROUP_UNDEF EM_STATIC_CAST(em_event_group_t, EM_HDL_UNDEF) -/** em_event_group_t printf format */ -#define PRI_EGRP PRI_HDL - -/** - * @typedef em_atomic_group_t - * Atomic Group handle - * - * This is used to combine multiple atomic queues into one - * atomically scheduled group. - * - * @see em_atomic_group_create() - */ -EM_HANDLE_T(em_atomic_group_t); -/** Undefined atomic group */ -#define EM_ATOMIC_GROUP_UNDEF EM_STATIC_CAST(em_atomic_group_t, EM_HDL_UNDEF) -/** em_atomic_group_t printf format */ -#define PRI_AGRP PRI_HDL - -/** - * @typedef em_queue_type_t - * Queue type. - * - * Affects the scheduling principle - * - * @see em_queue_create(), event_machine_hw_config.h - */ -typedef uint32_t em_queue_type_t; -#define PRI_QTYPE PRIu32 - -/** - * @typedef em_queue_prio_t - * Queue priority - * - * Queue priority defines implementation specific QoS class for event - * scheduling. Priority is an integer in range 0 (lowest) to num priorities - 1. - * Note, that the exact scheduling rules are not defined by EM and all available - * priorities may not be relative to the adjacent one (e.g. using dynamic - * priority, rate limiting or other more complex scheduling discipline). - * There are 5 generic predefined values (em_queue_prio_e) mapped to available - * runtime priorities for portability. - * - * @see em_queue_create(), em_queue_get_num_prio(), event_machine_hw_config.h, - * em_queue_prio_e - */ -typedef uint32_t em_queue_prio_t; -#define PRI_QPRIO PRIu32 - -/** - * Type for queue flags. - * - * This is an unsigned integer with defined flags, that can be combined by - * bitwise 'OR' only. EM_QUEUE_FLAG_DEFAULT can be used in most cases. - * Unused bits must be set to zero. The actual values are system specific, but - * the implementation need to define at least: EM_QUEUE_FLAG_DEFAULT, - * EM_QUEUE_FLAG_BLOCKING, EM_QUEUE_FLAG_NONBLOCKING_LF and - * EM_QUEUE_FLAG_NONBLOCKING_WF even if those would not be supported. - **/ -typedef uint32_t em_queue_flag_t; -/** - * @def EM_QUEUE_FLAG_MASK - * The low 16 bits are reserved for EM, the upper bits are free - * for system-specific use. - */ -#define EM_QUEUE_FLAG_MASK 0x0000FFFF - -/** - * Queue configuration data for queue-create APIs. The use of this conf is - * optional, but provides a standard way to pass extra parameters or specify - * extra requirements. - **/ -typedef struct { - /** - * Extra flags. See em_queue_flag_t for choices. - * EM_QUEUE_FLAG_DEFAULT is defined by all systems and indicates a - * default multithread-safe queue without any special guarantees. - **/ - em_queue_flag_t flags; - /** - * Request for a minimum amount of events the queue can hold or use - * 0 for EM default value. Queue creation will fail, if the system - * cannot support the requested amount. - **/ - unsigned int min_events; - /** - * Size of the data passed via 'conf'. 'conf' is ignored, - * if 'conf_len' is 0. - **/ - size_t conf_len; - /** - * Extra queue configuration data. This can also work - * as a placeholder for directly attached extra data. - **/ - void *conf; -} em_queue_conf_t; - -/** - * EO configuration data via em_eo_start. The use of this is - * optional, but provides a standard way to pass data to EO start. - * EM does not dereference any of the fields here. - **/ -typedef struct { - /** Size of the data passed via conf pointer */ - size_t conf_len; - /** Application specific configuration data */ - void *conf; -} em_eo_conf_t; - -/** - * Notification - * - * A notification structure allows the user to define a notification event and - * a destination queue with an optional event group. EM will notify the user by - * sending the event into the given queue. - * - * The egroup-field defines an optional event group for this notification. - * The used event group has to exist and be initialized. Use value - * EM_EVENT_GROUP_UNDEF for normal operation (API 1.0 functionality), i.e. - * notification is not sent to a group. - * egroup should not be the originating group, i.e. should not be sent - * back to the group. - * - * @attention API 1.0 code using notifications may need to be modified - * as the new field need to be initialized. Value EM_EVENT_GROUP_UNDEF - * is the correct value to use for non-group notification but value 0 - * is an alias, e.g. it is safe to initialize the structure with memset(0,..). - */ -typedef struct { - em_event_t event; /**< User defined notification event */ - em_queue_t queue; /**< Destination queue */ - em_event_group_t egroup; /**< Event group for this event */ -} em_notif_t; - -/** - * Scheduling context types - */ -typedef enum { - /** - * Parallel or released context - */ - EM_SCHED_CONTEXT_TYPE_NONE = 0, - /** - * Atomic context - */ - EM_SCHED_CONTEXT_TYPE_ATOMIC = 1, - /** - * Ordered context - */ - EM_SCHED_CONTEXT_TYPE_ORDERED = 2 -} em_sched_context_type_t; - -/** - * EO running state. Event dispatching is only enabled in running state. - **/ -typedef enum { - /** Undefined */ - EM_EO_STATE_UNDEF = 0, - /** Initial state after creation */ - EM_EO_STATE_CREATED = 1, - /** start called, not completed */ - EM_EO_STATE_STARTING = 2, - /** running, event dispatching enabled */ - EM_EO_STATE_RUNNING = 3, - /** stop called, not completed. Next state EM_EO_STATE_CREATED */ - EM_EO_STATE_STOPPING = 4, - /** exceptional state, only delete allowed */ - EM_EO_STATE_ERROR = 5 -} em_eo_state_t; - -/** - * @typedef em_status_t - * Error/Status code. - * - * EM_OK (0) is the general code for success, other values - * describe failed operation. - * There is a generic error code EM_ERROR, but application should - * normally test for not equal to EM_OK. - * - * @see event_machine_hw_config.h, em_error_handler_t(), em_error() - */ -typedef uint32_t em_status_t; -#define PRI_STAT PRIu32 -#define PRIxSTAT PRIx32 - -/** - * @def EM_OK - * Operation successful - */ -#define EM_OK 0 - -/** - * @def EM_ERROR - * Operation not successful. - * - * Generic error code, other error codes are system specific. - */ -#define EM_ERROR 0xffffffff - -/** - * @typedef em_escope_t - * Error scope. - * - * Identifies the error scope for interpreting error codes and variable - * arguments. - * - * @see em_error_handler_t(), em_error() - */ -typedef uint32_t em_escope_t; -#define PRI_ESCOPE PRIu32 - -/** - * @def EM_ESCOPE_BIT - * All EM internal error scopes should have bit 31 set - * - * NOTE: High bit is RESERVED for EM internal escopes and should not be - * used by the application. - */ -#define EM_ESCOPE_BIT (0x80000000u) - -/** - * @def EM_ESCOPE - * Test if the error scope identifies an EM function (API or other internal) - */ -#define EM_ESCOPE(escope) (EM_ESCOPE_BIT & (escope)) - -/** - * @def EM_ESCOPE_MASK - * Mask selects the high byte of the 32-bit escope - */ -#define EM_ESCOPE_MASK (0xFF000000) - -/** - * @def EM_ESCOPE_API_TYPE - * EM API functions error scope - */ -#define EM_ESCOPE_API_TYPE (0xFFu) - -/** - * @def EM_ESCOPE_API_MASK - * EM API functions error mask - */ -#define EM_ESCOPE_API_MASK (EM_ESCOPE_BIT | (EM_ESCOPE_API_TYPE << 24)) - -/** - * @def EM_ESCOPE_API - * Test if the error scope identifies an EM API function - */ -#define EM_ESCOPE_API(escope) (((escope) & EM_ESCOPE_MASK) == \ - EM_ESCOPE_API_MASK) - -/* - * EM API functions error scopes: - */ - -/* EM API escopes: Atomic Group */ -#define EM_ESCOPE_ATOMIC_GROUP_CREATE (EM_ESCOPE_API_MASK | 0x0001) -#define EM_ESCOPE_ATOMIC_GROUP_DELETE (EM_ESCOPE_API_MASK | 0x0002) -#define EM_ESCOPE_QUEUE_CREATE_AG (EM_ESCOPE_API_MASK | 0x0003) -#define EM_ESCOPE_QUEUE_CREATE_STATIC_AG (EM_ESCOPE_API_MASK | 0x0004) -#define EM_ESCOPE_ATOMIC_GROUP_GET (EM_ESCOPE_API_MASK | 0x0005) -#define EM_ESCOPE_ATOMIC_GROUP_GET_NAME (EM_ESCOPE_API_MASK | 0x0006) -#define EM_ESCOPE_ATOMIC_GROUP_FIND (EM_ESCOPE_API_MASK | 0x0007) -#define EM_ESCOPE_ATOMIC_GROUP_GET_FIRST (EM_ESCOPE_API_MASK | 0x0008) -#define EM_ESCOPE_ATOMIC_GROUP_GET_NEXT (EM_ESCOPE_API_MASK | 0x0009) -#define EM_ESCOPE_ATOMIC_GROUP_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x000A) -#define EM_ESCOPE_ATOMIC_GROUP_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x000B) - -/* EM API escopes: Core */ -#define EM_ESCOPE_CORE_ID (EM_ESCOPE_API_MASK | 0x0101) -#define EM_ESCOPE_CORE_COUNT (EM_ESCOPE_API_MASK | 0x0102) - -/* EM API escopes: Dispatcher */ -#define EM_ESCOPE_DISPATCH (EM_ESCOPE_API_MASK | 0x0201) -#define EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB (EM_ESCOPE_API_MASK | 0x0202) -#define EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB (EM_ESCOPE_API_MASK | 0x0203) -#define EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB (EM_ESCOPE_API_MASK | 0x0204) -#define EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB (EM_ESCOPE_API_MASK | 0x0205) - -/* EM API escopes: EO */ -#define EM_ESCOPE_EO_CREATE (EM_ESCOPE_API_MASK | 0x0301) -#define EM_ESCOPE_EO_CREATE_MULTIRCV (EM_ESCOPE_API_MASK | 0x0302) -#define EM_ESCOPE_EO_MULTIRCV_PARAM_INIT (EM_ESCOPE_API_MASK | 0x0303) -#define EM_ESCOPE_EO_DELETE (EM_ESCOPE_API_MASK | 0x0304) -#define EM_ESCOPE_EO_GET_NAME (EM_ESCOPE_API_MASK | 0x0305) -#define EM_ESCOPE_EO_FIND (EM_ESCOPE_API_MASK | 0x0306) -#define EM_ESCOPE_EO_ADD_QUEUE (EM_ESCOPE_API_MASK | 0x0307) -#define EM_ESCOPE_EO_ADD_QUEUE_SYNC (EM_ESCOPE_API_MASK | 0x0308) -#define EM_ESCOPE_EO_REMOVE_QUEUE (EM_ESCOPE_API_MASK | 0x0309) -#define EM_ESCOPE_EO_REMOVE_QUEUE_SYNC (EM_ESCOPE_API_MASK | 0x030A) -#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL (EM_ESCOPE_API_MASK | 0x030B) -#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC (EM_ESCOPE_API_MASK | 0x030C) -#define EM_ESCOPE_EO_REGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x030D) -#define EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x030E) -#define EM_ESCOPE_EO_START (EM_ESCOPE_API_MASK | 0x030F) -#define EM_ESCOPE_EO_START_SYNC (EM_ESCOPE_API_MASK | 0x0310) -#define EM_ESCOPE_EO_STOP (EM_ESCOPE_API_MASK | 0x0311) -#define EM_ESCOPE_EO_STOP_SYNC (EM_ESCOPE_API_MASK | 0x0312) -#define EM_ESCOPE_EO_CURRENT (EM_ESCOPE_API_MASK | 0x0313) -#define EM_ESCOPE_EO_GET_CONTEXT (EM_ESCOPE_API_MASK | 0x0314) -#define EM_ESCOPE_EO_GET_FIRST (EM_ESCOPE_API_MASK | 0x0315) -#define EM_ESCOPE_EO_GET_NEXT (EM_ESCOPE_API_MASK | 0x0316) -#define EM_ESCOPE_EO_GET_STATE (EM_ESCOPE_API_MASK | 0x0317) -#define EM_ESCOPE_EO_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x0318) -#define EM_ESCOPE_EO_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x0319) - -/* EM API escopes: Error */ -#define EM_ESCOPE_REGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x0401) -#define EM_ESCOPE_UNREGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x0402) -#define EM_ESCOPE_ERROR (EM_ESCOPE_API_MASK | 0x0403) - -/* EM API escopes: Event Group */ -#define EM_ESCOPE_EVENT_GROUP_CREATE (EM_ESCOPE_API_MASK | 0x0501) -#define EM_ESCOPE_EVENT_GROUP_DELETE (EM_ESCOPE_API_MASK | 0x0502) -#define EM_ESCOPE_EVENT_GROUP_APPLY (EM_ESCOPE_API_MASK | 0x0503) -#define EM_ESCOPE_EVENT_GROUP_INCREMENT (EM_ESCOPE_API_MASK | 0x0504) -#define EM_ESCOPE_EVENT_GROUP_CURRENT (EM_ESCOPE_API_MASK | 0x0505) -#define EM_ESCOPE_EVENT_GROUP_IS_READY (EM_ESCOPE_API_MASK | 0x0506) -#define EM_ESCOPE_SEND_GROUP (EM_ESCOPE_API_MASK | 0x0507) -#define EM_ESCOPE_SEND_GROUP_MULTI (EM_ESCOPE_API_MASK | 0x0508) -#define EM_ESCOPE_EVENT_GROUP_PROCESSING_END (EM_ESCOPE_API_MASK | 0x0509) -#define EM_ESCOPE_EVENT_GROUP_ASSIGN (EM_ESCOPE_API_MASK | 0x050A) -#define EM_ESCOPE_EVENT_GROUP_ABORT (EM_ESCOPE_API_MASK | 0x050B) -#define EM_ESCOPE_EVENT_GROUP_GET_NOTIF (EM_ESCOPE_API_MASK | 0x050C) -#define EM_ESCOPE_EVENT_GROUP_GET_FIRST (EM_ESCOPE_API_MASK | 0x050D) -#define EM_ESCOPE_EVENT_GROUP_GET_NEXT (EM_ESCOPE_API_MASK | 0x050E) - -/* EM API escopes: Event */ -#define EM_ESCOPE_ALLOC (EM_ESCOPE_API_MASK | 0x0601) -#define EM_ESCOPE_ALLOC_MULTI (EM_ESCOPE_API_MASK | 0x0602) -#define EM_ESCOPE_FREE (EM_ESCOPE_API_MASK | 0x0603) -#define EM_ESCOPE_FREE_MULTI (EM_ESCOPE_API_MASK | 0x0604) -#define EM_ESCOPE_SEND (EM_ESCOPE_API_MASK | 0x0605) -#define EM_ESCOPE_SEND_MULTI (EM_ESCOPE_API_MASK | 0x0606) -#define EM_ESCOPE_EVENT_POINTER (EM_ESCOPE_API_MASK | 0x0607) -#define EM_ESCOPE_EVENT_GET_SIZE (EM_ESCOPE_API_MASK | 0x0608) -#define EM_ESCOPE_EVENT_GET_POOL (EM_ESCOPE_API_MASK | 0x0609) -#define EM_ESCOPE_EVENT_SET_TYPE (EM_ESCOPE_API_MASK | 0x060A) -#define EM_ESCOPE_EVENT_GET_TYPE (EM_ESCOPE_API_MASK | 0x060B) -#define EM_ESCOPE_EVENT_GET_TYPE_MULTI (EM_ESCOPE_API_MASK | 0x060C) -#define EM_ESCOPE_EVENT_SAME_TYPE_MULTI (EM_ESCOPE_API_MASK | 0x060D) -#define EM_ESCOPE_EVENT_MARK_SEND (EM_ESCOPE_API_MASK | 0x060E) -#define EM_ESCOPE_EVENT_UNMARK_SEND (EM_ESCOPE_API_MASK | 0x060F) -#define EM_ESCOPE_EVENT_MARK_FREE (EM_ESCOPE_API_MASK | 0x0610) -#define EM_ESCOPE_EVENT_UNMARK_FREE (EM_ESCOPE_API_MASK | 0x0611) -#define EM_ESCOPE_EVENT_MARK_FREE_MULTI (EM_ESCOPE_API_MASK | 0x0612) -#define EM_ESCOPE_EVENT_UNMARK_FREE_MULTI (EM_ESCOPE_API_MASK | 0x0613) -#define EM_ESCOPE_EVENT_CLONE (EM_ESCOPE_API_MASK | 0x0614) -#define EM_ESCOPE_EVENT_UAREA_GET (EM_ESCOPE_API_MASK | 0x0615) -#define EM_ESCOPE_EVENT_UAREA_ID_GET (EM_ESCOPE_API_MASK | 0x0616) -#define EM_ESCOPE_EVENT_UAREA_ID_SET (EM_ESCOPE_API_MASK | 0x0617) -#define EM_ESCOPE_EVENT_UAREA_INFO (EM_ESCOPE_API_MASK | 0x0618) - -/* EM API escopes: Queue Group */ -#define EM_ESCOPE_QUEUE_GROUP_CREATE (EM_ESCOPE_API_MASK | 0x0701) -#define EM_ESCOPE_QUEUE_GROUP_CREATE_SYNC (EM_ESCOPE_API_MASK | 0x0702) -#define EM_ESCOPE_QUEUE_GROUP_DELETE (EM_ESCOPE_API_MASK | 0x0703) -#define EM_ESCOPE_QUEUE_GROUP_DELETE_SYNC (EM_ESCOPE_API_MASK | 0x0704) -#define EM_ESCOPE_QUEUE_GROUP_MODIFY (EM_ESCOPE_API_MASK | 0x0705) -#define EM_ESCOPE_QUEUE_GROUP_MODIFY_SYNC (EM_ESCOPE_API_MASK | 0x0706) -#define EM_ESCOPE_QUEUE_GROUP_FIND (EM_ESCOPE_API_MASK | 0x0707) -#define EM_ESCOPE_QUEUE_GROUP_MASK (EM_ESCOPE_API_MASK | 0x0708) -#define EM_ESCOPE_QUEUE_GROUP_GET_NAME (EM_ESCOPE_API_MASK | 0x0709) -#define EM_ESCOPE_QUEUE_GROUP_GET_FIRST (EM_ESCOPE_API_MASK | 0x070A) -#define EM_ESCOPE_QUEUE_GROUP_GET_NEXT (EM_ESCOPE_API_MASK | 0x070B) -#define EM_ESCOPE_QUEUE_GROUP_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x070C) -#define EM_ESCOPE_QUEUE_GROUP_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x070D) - -/* EM API escopes: Queue */ -#define EM_ESCOPE_QUEUE_CREATE (EM_ESCOPE_API_MASK | 0x0801) -#define EM_ESCOPE_QUEUE_CREATE_STATIC (EM_ESCOPE_API_MASK | 0x0802) -#define EM_ESCOPE_QUEUE_DELETE (EM_ESCOPE_API_MASK | 0x0803) -#define EM_ESCOPE_QUEUE_SET_CONTEXT (EM_ESCOPE_API_MASK | 0x0804) -#define EM_ESCOPE_QUEUE_GET_CONTEXT (EM_ESCOPE_API_MASK | 0x0805) -#define EM_ESCOPE_QUEUE_GET_NAME (EM_ESCOPE_API_MASK | 0x0806) -#define EM_ESCOPE_QUEUE_GET_PRIORITY (EM_ESCOPE_API_MASK | 0x0807) -#define EM_ESCOPE_QUEUE_GET_TYPE (EM_ESCOPE_API_MASK | 0x0808) -#define EM_ESCOPE_QUEUE_GET_GROUP (EM_ESCOPE_API_MASK | 0x0809) -#define EM_ESCOPE_QUEUE_FIND (EM_ESCOPE_API_MASK | 0x080A) -#define EM_ESCOPE_QUEUE_DEQUEUE (EM_ESCOPE_API_MASK | 0x080B) -#define EM_ESCOPE_QUEUE_DEQUEUE_MULTI (EM_ESCOPE_API_MASK | 0x080C) -#define EM_ESCOPE_QUEUE_CURRENT (EM_ESCOPE_API_MASK | 0x080D) -#define EM_ESCOPE_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x080E) -#define EM_ESCOPE_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x080F) -#define EM_ESCOPE_QUEUE_GET_INDEX (EM_ESCOPE_API_MASK | 0x0810) -#define EM_ESCOPE_QUEUE_GET_NUM_PRIO (EM_ESCOPE_API_MASK | 0x0811) - -/* EM API escopes: Scheduler */ -#define EM_ESCOPE_ATOMIC_PROCESSING_END (EM_ESCOPE_API_MASK | 0x0901) -#define EM_ESCOPE_ORDERED_PROCESSING_END (EM_ESCOPE_API_MASK | 0x0902) -#define EM_ESCOPE_PRESCHEDULE (EM_ESCOPE_API_MASK | 0x0903) -#define EM_ESCOPE_SCHED_CONTEXT_TYPE_CURRENT (EM_ESCOPE_API_MASK | 0x0904) - -/* add-on APIs have a separate escope file but define a base here */ -#define EM_ESCOPE_ADD_ON_API_BASE (EM_ESCOPE_API_MASK | 0x1000) - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_TYPES_H_ */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_TYPES_H_ +#define EVENT_MACHINE_TYPES_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * + * Event Machine basic types + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +/** EM boolean values. */ +#define EM_TRUE 1 /**< True */ +#define EM_FALSE 0 /**< False */ + +/** + * @typedef em_event_t + * Event handle + */ +EM_HANDLE_T(em_event_t); +/** Undefined event */ +#define EM_EVENT_UNDEF EM_STATIC_CAST(em_event_t, EM_HDL_UNDEF) +/** em_event_t printf format */ +#define PRI_EVENT PRI_HDL + +/** + * @typedef em_event_type_t + * Event type + * + * The event type is given to the EO-receive function for each received event + * and is also needed for event allocation. This type is an integer that is + * split into major and minor parts: + * 1) the major-field categorizes the event and + * 2) the minor is a more detailed system specific description. + * The major-part will not change by HW, but the minor-part can be + * HW/SW platform specific and thus could be split into more sub-fields as + * needed. The application should use the access functions for reading major + * and minor parts. + * + * The only event type with defined content is EM_EVENT_TYPE_SW with + * minor type 0, which needs to be portable (direct pointer to data). + * + * @see em_event_type_major(), em_event_type_minor(), em_receive_func_t() + */ +typedef uint32_t em_event_type_t; + +/** + * @typedef em_eo_t + * Execution Object handle + * + * @see em_eo_create() + */ +EM_HANDLE_T(em_eo_t); +/** Undefined EO */ +#define EM_EO_UNDEF EM_STATIC_CAST(em_eo_t, EM_HDL_UNDEF) +/** em_eo_t printf format */ +#define PRI_EO PRI_HDL + +/** + * @typedef em_queue_t + * Queue handle + * + * @see em_queue_create(), em_receive_func_t(), em_send() + */ +EM_HANDLE_T(em_queue_t); +/** Undefined queue */ +#define EM_QUEUE_UNDEF EM_STATIC_CAST(em_queue_t, EM_HDL_UNDEF) +/** em_queue_t printf format */ +#define PRI_QUEUE PRI_HDL + +/** + * @typedef em_queue_group_t + * Queue Group handle + * + * Each queue belongs to one queue group that defines a core mask for + * scheduling events, i.e. defines which cores participate in load balancing. + * A queue group can also allow only a single core for no load balancing. + * + * Queue groups need to be created as needed. One default queue group, i.e. + * EM_QUEUE_GROUP_DEFAULT, always exists, and that allows scheduling to all the + * EM cores running this execution binary instance. + * + * @see em_queue_group_create() + */ +EM_HANDLE_T(em_queue_group_t); +/** Undefined queue group */ +#define EM_QUEUE_GROUP_UNDEF EM_STATIC_CAST(em_queue_group_t, EM_HDL_UNDEF) +/** em_queue_group_t printf format */ +#define PRI_QGRP PRI_HDL + +/** + * @typedef em_event_group_t + * Event Group handle + * + * This is used for fork-join event handling. + * + * @see em_event_group_create() + */ +EM_HANDLE_T(em_event_group_t); +/** Undefined event group */ +#define EM_EVENT_GROUP_UNDEF EM_STATIC_CAST(em_event_group_t, EM_HDL_UNDEF) +/** em_event_group_t printf format */ +#define PRI_EGRP PRI_HDL + +/** + * @typedef em_atomic_group_t + * Atomic Group handle + * + * This is used to combine multiple atomic queues into one + * atomically scheduled group. + * + * @see em_atomic_group_create() + */ +EM_HANDLE_T(em_atomic_group_t); +/** Undefined atomic group */ +#define EM_ATOMIC_GROUP_UNDEF EM_STATIC_CAST(em_atomic_group_t, EM_HDL_UNDEF) +/** em_atomic_group_t printf format */ +#define PRI_AGRP PRI_HDL + +/** + * @typedef em_queue_type_t + * Queue type. + * + * Affects the scheduling principle + * + * @see em_queue_create(), event_machine_hw_config.h + */ +typedef uint32_t em_queue_type_t; +#define PRI_QTYPE PRIu32 + +/** + * @typedef em_queue_prio_t + * Queue priority + * + * Queue priority defines implementation specific QoS class for event + * scheduling. Priority is an integer in range 0 (lowest) to num priorities - 1. + * Note, that the exact scheduling rules are not defined by EM and all available + * priorities may not be relative to the adjacent one (e.g. using dynamic + * priority, rate limiting or other more complex scheduling discipline). + * There are 5 generic predefined values (em_queue_prio_e) mapped to available + * runtime priorities for portability. + * + * @see em_queue_create(), em_queue_get_num_prio(), event_machine_hw_config.h, + * em_queue_prio_e + */ +typedef uint32_t em_queue_prio_t; +#define PRI_QPRIO PRIu32 + +/** + * Type for queue flags. + * + * This is an unsigned integer with defined flags, that can be combined by + * bitwise 'OR' only. EM_QUEUE_FLAG_DEFAULT can be used in most cases. + * Unused bits must be set to zero. The actual values are system specific, but + * the implementation need to define at least: EM_QUEUE_FLAG_DEFAULT, + * EM_QUEUE_FLAG_BLOCKING, EM_QUEUE_FLAG_NONBLOCKING_LF and + * EM_QUEUE_FLAG_NONBLOCKING_WF even if those would not be supported. + **/ +typedef uint32_t em_queue_flag_t; +/** + * @def EM_QUEUE_FLAG_MASK + * The low 16 bits are reserved for EM, the upper bits are free + * for system-specific use. + */ +#define EM_QUEUE_FLAG_MASK 0x0000FFFF + +/** + * Queue configuration data for queue-create APIs. The use of this conf is + * optional, but provides a standard way to pass extra parameters or specify + * extra requirements. + **/ +typedef struct { + /** + * Extra flags. See em_queue_flag_t for choices. + * EM_QUEUE_FLAG_DEFAULT is defined by all systems and indicates a + * default multithread-safe queue without any special guarantees. + **/ + em_queue_flag_t flags; + /** + * Request for a minimum amount of events the queue can hold or use + * 0 for EM default value. Queue creation will fail, if the system + * cannot support the requested amount. + **/ + unsigned int min_events; + /** + * Size of the data passed via 'conf'. 'conf' is ignored, + * if 'conf_len' is 0. + **/ + size_t conf_len; + /** + * Extra queue configuration data. This can also work + * as a placeholder for directly attached extra data. + **/ + void *conf; +} em_queue_conf_t; + +/** + * EO configuration data via em_eo_start. The use of this is + * optional, but provides a standard way to pass data to EO start. + * EM does not dereference any of the fields here. + **/ +typedef struct { + /** Size of the data passed via conf pointer */ + size_t conf_len; + /** Application specific configuration data */ + void *conf; +} em_eo_conf_t; + +/** + * Notification + * + * A notification structure allows the user to define a notification event and + * a destination queue with an optional event group. EM will notify the user by + * sending the event into the given queue. + * + * The egroup-field defines an optional event group for this notification. + * The used event group has to exist and be initialized. Use value + * EM_EVENT_GROUP_UNDEF for normal operation (API 1.0 functionality), i.e. + * notification is not sent to a group. + * egroup should not be the originating group, i.e. should not be sent + * back to the group. + * + * @attention API 1.0 code using notifications may need to be modified + * as the new field need to be initialized. Value EM_EVENT_GROUP_UNDEF + * is the correct value to use for non-group notification but value 0 + * is an alias, e.g. it is safe to initialize the structure with memset(0,..). + */ +typedef struct { + em_event_t event; /**< User defined notification event */ + em_queue_t queue; /**< Destination queue */ + em_event_group_t egroup; /**< Event group for this event */ +} em_notif_t; + +/** + * Scheduling context types + */ +typedef enum { + /** + * Parallel or released context + */ + EM_SCHED_CONTEXT_TYPE_NONE = 0, + /** + * Atomic context + */ + EM_SCHED_CONTEXT_TYPE_ATOMIC = 1, + /** + * Ordered context + */ + EM_SCHED_CONTEXT_TYPE_ORDERED = 2 +} em_sched_context_type_t; + +/** + * EO running state. Event dispatching is only enabled in running state. + **/ +typedef enum { + /** Undefined */ + EM_EO_STATE_UNDEF = 0, + /** Initial state after creation */ + EM_EO_STATE_CREATED = 1, + /** start called, not completed */ + EM_EO_STATE_STARTING = 2, + /** running, event dispatching enabled */ + EM_EO_STATE_RUNNING = 3, + /** stop called, not completed. Next state EM_EO_STATE_CREATED */ + EM_EO_STATE_STOPPING = 4, + /** exceptional state, only delete allowed */ + EM_EO_STATE_ERROR = 5 +} em_eo_state_t; + +/** + * @typedef em_status_t + * Error/Status code. + * + * EM_OK (0) is the general code for success, other values + * describe failed operation. + * There is a generic error code EM_ERROR, but application should + * normally test for not equal to EM_OK. + * + * @see event_machine_hw_config.h, em_error_handler_t(), em_error() + */ +typedef uint32_t em_status_t; +#define PRI_STAT PRIu32 +#define PRIxSTAT PRIx32 + +/** + * @def EM_OK + * Operation successful + */ +#define EM_OK 0 + +/** + * @def EM_ERROR + * Operation not successful. + * + * Generic error code, other error codes are system specific. + */ +#define EM_ERROR 0xffffffff + +/** + * @typedef em_escope_t + * Error scope. + * + * Identifies the error scope for interpreting error codes and variable + * arguments. + * + * @see em_error_handler_t(), em_error() + */ +typedef uint32_t em_escope_t; +#define PRI_ESCOPE PRIu32 + +/** + * @def EM_ESCOPE_BIT + * All EM internal error scopes should have bit 31 set + * + * NOTE: High bit is RESERVED for EM internal escopes and should not be + * used by the application. + */ +#define EM_ESCOPE_BIT (0x80000000u) + +/** + * @def EM_ESCOPE + * Test if the error scope identifies an EM function (API or other internal) + */ +#define EM_ESCOPE(escope) (EM_ESCOPE_BIT & (escope)) + +/** + * @def EM_ESCOPE_MASK + * Mask selects the high byte of the 32-bit escope + */ +#define EM_ESCOPE_MASK (0xFF000000) + +/** + * @def EM_ESCOPE_API_TYPE + * EM API functions error scope + */ +#define EM_ESCOPE_API_TYPE (0xFFu) + +/** + * @def EM_ESCOPE_API_MASK + * EM API functions error mask + */ +#define EM_ESCOPE_API_MASK (EM_ESCOPE_BIT | (EM_ESCOPE_API_TYPE << 24)) + +/** + * @def EM_ESCOPE_API + * Test if the error scope identifies an EM API function + */ +#define EM_ESCOPE_API(escope) (((escope) & EM_ESCOPE_MASK) == \ + EM_ESCOPE_API_MASK) + +/* + * EM API functions error scopes: + */ + +/* EM API escopes: Atomic Group */ +#define EM_ESCOPE_ATOMIC_GROUP_CREATE (EM_ESCOPE_API_MASK | 0x0001) +#define EM_ESCOPE_ATOMIC_GROUP_DELETE (EM_ESCOPE_API_MASK | 0x0002) +#define EM_ESCOPE_QUEUE_CREATE_AG (EM_ESCOPE_API_MASK | 0x0003) +#define EM_ESCOPE_QUEUE_CREATE_STATIC_AG (EM_ESCOPE_API_MASK | 0x0004) +#define EM_ESCOPE_ATOMIC_GROUP_GET (EM_ESCOPE_API_MASK | 0x0005) +#define EM_ESCOPE_ATOMIC_GROUP_GET_NAME (EM_ESCOPE_API_MASK | 0x0006) +#define EM_ESCOPE_ATOMIC_GROUP_FIND (EM_ESCOPE_API_MASK | 0x0007) +#define EM_ESCOPE_ATOMIC_GROUP_GET_FIRST (EM_ESCOPE_API_MASK | 0x0008) +#define EM_ESCOPE_ATOMIC_GROUP_GET_NEXT (EM_ESCOPE_API_MASK | 0x0009) +#define EM_ESCOPE_ATOMIC_GROUP_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x000A) +#define EM_ESCOPE_ATOMIC_GROUP_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x000B) + +/* EM API escopes: Core */ +#define EM_ESCOPE_CORE_ID (EM_ESCOPE_API_MASK | 0x0101) +#define EM_ESCOPE_CORE_COUNT (EM_ESCOPE_API_MASK | 0x0102) + +/* EM API escopes: Dispatcher */ +#define EM_ESCOPE_DISPATCH (EM_ESCOPE_API_MASK | 0x0201) +#define EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB (EM_ESCOPE_API_MASK | 0x0202) +#define EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB (EM_ESCOPE_API_MASK | 0x0203) +#define EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB (EM_ESCOPE_API_MASK | 0x0204) +#define EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB (EM_ESCOPE_API_MASK | 0x0205) + +/* EM API escopes: EO */ +#define EM_ESCOPE_EO_CREATE (EM_ESCOPE_API_MASK | 0x0301) +#define EM_ESCOPE_EO_CREATE_MULTIRCV (EM_ESCOPE_API_MASK | 0x0302) +#define EM_ESCOPE_EO_MULTIRCV_PARAM_INIT (EM_ESCOPE_API_MASK | 0x0303) +#define EM_ESCOPE_EO_DELETE (EM_ESCOPE_API_MASK | 0x0304) +#define EM_ESCOPE_EO_GET_NAME (EM_ESCOPE_API_MASK | 0x0305) +#define EM_ESCOPE_EO_FIND (EM_ESCOPE_API_MASK | 0x0306) +#define EM_ESCOPE_EO_ADD_QUEUE (EM_ESCOPE_API_MASK | 0x0307) +#define EM_ESCOPE_EO_ADD_QUEUE_SYNC (EM_ESCOPE_API_MASK | 0x0308) +#define EM_ESCOPE_EO_REMOVE_QUEUE (EM_ESCOPE_API_MASK | 0x0309) +#define EM_ESCOPE_EO_REMOVE_QUEUE_SYNC (EM_ESCOPE_API_MASK | 0x030A) +#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL (EM_ESCOPE_API_MASK | 0x030B) +#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC (EM_ESCOPE_API_MASK | 0x030C) +#define EM_ESCOPE_EO_REGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x030D) +#define EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x030E) +#define EM_ESCOPE_EO_START (EM_ESCOPE_API_MASK | 0x030F) +#define EM_ESCOPE_EO_START_SYNC (EM_ESCOPE_API_MASK | 0x0310) +#define EM_ESCOPE_EO_STOP (EM_ESCOPE_API_MASK | 0x0311) +#define EM_ESCOPE_EO_STOP_SYNC (EM_ESCOPE_API_MASK | 0x0312) +#define EM_ESCOPE_EO_CURRENT (EM_ESCOPE_API_MASK | 0x0313) +#define EM_ESCOPE_EO_GET_CONTEXT (EM_ESCOPE_API_MASK | 0x0314) +#define EM_ESCOPE_EO_GET_FIRST (EM_ESCOPE_API_MASK | 0x0315) +#define EM_ESCOPE_EO_GET_NEXT (EM_ESCOPE_API_MASK | 0x0316) +#define EM_ESCOPE_EO_GET_STATE (EM_ESCOPE_API_MASK | 0x0317) +#define EM_ESCOPE_EO_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x0318) +#define EM_ESCOPE_EO_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x0319) + +/* EM API escopes: Error */ +#define EM_ESCOPE_REGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x0401) +#define EM_ESCOPE_UNREGISTER_ERROR_HANDLER (EM_ESCOPE_API_MASK | 0x0402) +#define EM_ESCOPE_ERROR (EM_ESCOPE_API_MASK | 0x0403) + +/* EM API escopes: Event Group */ +#define EM_ESCOPE_EVENT_GROUP_CREATE (EM_ESCOPE_API_MASK | 0x0501) +#define EM_ESCOPE_EVENT_GROUP_DELETE (EM_ESCOPE_API_MASK | 0x0502) +#define EM_ESCOPE_EVENT_GROUP_APPLY (EM_ESCOPE_API_MASK | 0x0503) +#define EM_ESCOPE_EVENT_GROUP_INCREMENT (EM_ESCOPE_API_MASK | 0x0504) +#define EM_ESCOPE_EVENT_GROUP_CURRENT (EM_ESCOPE_API_MASK | 0x0505) +#define EM_ESCOPE_EVENT_GROUP_IS_READY (EM_ESCOPE_API_MASK | 0x0506) +#define EM_ESCOPE_SEND_GROUP (EM_ESCOPE_API_MASK | 0x0507) +#define EM_ESCOPE_SEND_GROUP_MULTI (EM_ESCOPE_API_MASK | 0x0508) +#define EM_ESCOPE_EVENT_GROUP_PROCESSING_END (EM_ESCOPE_API_MASK | 0x0509) +#define EM_ESCOPE_EVENT_GROUP_ASSIGN (EM_ESCOPE_API_MASK | 0x050A) +#define EM_ESCOPE_EVENT_GROUP_ABORT (EM_ESCOPE_API_MASK | 0x050B) +#define EM_ESCOPE_EVENT_GROUP_GET_NOTIF (EM_ESCOPE_API_MASK | 0x050C) +#define EM_ESCOPE_EVENT_GROUP_GET_FIRST (EM_ESCOPE_API_MASK | 0x050D) +#define EM_ESCOPE_EVENT_GROUP_GET_NEXT (EM_ESCOPE_API_MASK | 0x050E) + +/* EM API escopes: Event */ +#define EM_ESCOPE_ALLOC (EM_ESCOPE_API_MASK | 0x0601) +#define EM_ESCOPE_ALLOC_MULTI (EM_ESCOPE_API_MASK | 0x0602) +#define EM_ESCOPE_FREE (EM_ESCOPE_API_MASK | 0x0603) +#define EM_ESCOPE_FREE_MULTI (EM_ESCOPE_API_MASK | 0x0604) +#define EM_ESCOPE_SEND (EM_ESCOPE_API_MASK | 0x0605) +#define EM_ESCOPE_SEND_MULTI (EM_ESCOPE_API_MASK | 0x0606) +#define EM_ESCOPE_EVENT_POINTER (EM_ESCOPE_API_MASK | 0x0607) +#define EM_ESCOPE_EVENT_GET_SIZE (EM_ESCOPE_API_MASK | 0x0608) +#define EM_ESCOPE_EVENT_GET_POOL (EM_ESCOPE_API_MASK | 0x0609) +#define EM_ESCOPE_EVENT_SET_TYPE (EM_ESCOPE_API_MASK | 0x060A) +#define EM_ESCOPE_EVENT_GET_TYPE (EM_ESCOPE_API_MASK | 0x060B) +#define EM_ESCOPE_EVENT_GET_TYPE_MULTI (EM_ESCOPE_API_MASK | 0x060C) +#define EM_ESCOPE_EVENT_SAME_TYPE_MULTI (EM_ESCOPE_API_MASK | 0x060D) +#define EM_ESCOPE_EVENT_MARK_SEND (EM_ESCOPE_API_MASK | 0x060E) +#define EM_ESCOPE_EVENT_UNMARK_SEND (EM_ESCOPE_API_MASK | 0x060F) +#define EM_ESCOPE_EVENT_MARK_FREE (EM_ESCOPE_API_MASK | 0x0610) +#define EM_ESCOPE_EVENT_UNMARK_FREE (EM_ESCOPE_API_MASK | 0x0611) +#define EM_ESCOPE_EVENT_MARK_FREE_MULTI (EM_ESCOPE_API_MASK | 0x0612) +#define EM_ESCOPE_EVENT_UNMARK_FREE_MULTI (EM_ESCOPE_API_MASK | 0x0613) +#define EM_ESCOPE_EVENT_CLONE (EM_ESCOPE_API_MASK | 0x0614) +#define EM_ESCOPE_EVENT_UAREA_GET (EM_ESCOPE_API_MASK | 0x0615) +#define EM_ESCOPE_EVENT_UAREA_ID_GET (EM_ESCOPE_API_MASK | 0x0616) +#define EM_ESCOPE_EVENT_UAREA_ID_SET (EM_ESCOPE_API_MASK | 0x0617) +#define EM_ESCOPE_EVENT_UAREA_INFO (EM_ESCOPE_API_MASK | 0x0618) +#define EM_ESCOPE_EVENT_REF (EM_ESCOPE_API_MASK | 0x0619) +#define EM_ESCOPE_EVENT_HAS_REF (EM_ESCOPE_API_MASK | 0x061A) +#define EM_ESCOPE_EVENT_VECTOR_FREE (EM_ESCOPE_API_MASK | 0x061B) +#define EM_ESCOPE_EVENT_VECTOR_TBL (EM_ESCOPE_API_MASK | 0x061C) +#define EM_ESCOPE_EVENT_VECTOR_SIZE (EM_ESCOPE_API_MASK | 0x061D) +#define EM_ESCOPE_EVENT_VECTOR_SIZE_SET (EM_ESCOPE_API_MASK | 0x061E) +#define EM_ESCOPE_EVENT_VECTOR_MAX_SIZE (EM_ESCOPE_API_MASK | 0x061F) +#define EM_ESCOPE_EVENT_VECTOR_INFO (EM_ESCOPE_API_MASK | 0x0620) + +/* EM API escopes: Queue Group */ +#define EM_ESCOPE_QUEUE_GROUP_CREATE (EM_ESCOPE_API_MASK | 0x0701) +#define EM_ESCOPE_QUEUE_GROUP_CREATE_SYNC (EM_ESCOPE_API_MASK | 0x0702) +#define EM_ESCOPE_QUEUE_GROUP_DELETE (EM_ESCOPE_API_MASK | 0x0703) +#define EM_ESCOPE_QUEUE_GROUP_DELETE_SYNC (EM_ESCOPE_API_MASK | 0x0704) +#define EM_ESCOPE_QUEUE_GROUP_MODIFY (EM_ESCOPE_API_MASK | 0x0705) +#define EM_ESCOPE_QUEUE_GROUP_MODIFY_SYNC (EM_ESCOPE_API_MASK | 0x0706) +#define EM_ESCOPE_QUEUE_GROUP_FIND (EM_ESCOPE_API_MASK | 0x0707) +#define EM_ESCOPE_QUEUE_GROUP_MASK (EM_ESCOPE_API_MASK | 0x0708) +#define EM_ESCOPE_QUEUE_GROUP_GET_NAME (EM_ESCOPE_API_MASK | 0x0709) +#define EM_ESCOPE_QUEUE_GROUP_GET_FIRST (EM_ESCOPE_API_MASK | 0x070A) +#define EM_ESCOPE_QUEUE_GROUP_GET_NEXT (EM_ESCOPE_API_MASK | 0x070B) +#define EM_ESCOPE_QUEUE_GROUP_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x070C) +#define EM_ESCOPE_QUEUE_GROUP_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x070D) + +/* EM API escopes: Queue */ +#define EM_ESCOPE_QUEUE_CREATE (EM_ESCOPE_API_MASK | 0x0801) +#define EM_ESCOPE_QUEUE_CREATE_STATIC (EM_ESCOPE_API_MASK | 0x0802) +#define EM_ESCOPE_QUEUE_DELETE (EM_ESCOPE_API_MASK | 0x0803) +#define EM_ESCOPE_QUEUE_SET_CONTEXT (EM_ESCOPE_API_MASK | 0x0804) +#define EM_ESCOPE_QUEUE_GET_CONTEXT (EM_ESCOPE_API_MASK | 0x0805) +#define EM_ESCOPE_QUEUE_GET_NAME (EM_ESCOPE_API_MASK | 0x0806) +#define EM_ESCOPE_QUEUE_GET_PRIORITY (EM_ESCOPE_API_MASK | 0x0807) +#define EM_ESCOPE_QUEUE_GET_TYPE (EM_ESCOPE_API_MASK | 0x0808) +#define EM_ESCOPE_QUEUE_GET_GROUP (EM_ESCOPE_API_MASK | 0x0809) +#define EM_ESCOPE_QUEUE_FIND (EM_ESCOPE_API_MASK | 0x080A) +#define EM_ESCOPE_QUEUE_DEQUEUE (EM_ESCOPE_API_MASK | 0x080B) +#define EM_ESCOPE_QUEUE_DEQUEUE_MULTI (EM_ESCOPE_API_MASK | 0x080C) +#define EM_ESCOPE_QUEUE_CURRENT (EM_ESCOPE_API_MASK | 0x080D) +#define EM_ESCOPE_QUEUE_GET_FIRST (EM_ESCOPE_API_MASK | 0x080E) +#define EM_ESCOPE_QUEUE_GET_NEXT (EM_ESCOPE_API_MASK | 0x080F) +#define EM_ESCOPE_QUEUE_GET_INDEX (EM_ESCOPE_API_MASK | 0x0810) +#define EM_ESCOPE_QUEUE_GET_NUM_PRIO (EM_ESCOPE_API_MASK | 0x0811) + +/* EM API escopes: Scheduler */ +#define EM_ESCOPE_ATOMIC_PROCESSING_END (EM_ESCOPE_API_MASK | 0x0901) +#define EM_ESCOPE_ORDERED_PROCESSING_END (EM_ESCOPE_API_MASK | 0x0902) +#define EM_ESCOPE_PRESCHEDULE (EM_ESCOPE_API_MASK | 0x0903) +#define EM_ESCOPE_SCHED_CONTEXT_TYPE_CURRENT (EM_ESCOPE_API_MASK | 0x0904) + +/* add-on APIs have a separate escope file but define a base here */ +#define EM_ESCOPE_ADD_ON_API_BASE (EM_ESCOPE_API_MASK | 0x1000) + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_TYPES_H_ */ diff --git a/include/event_machine/helper/event_machine_debug.h b/include/event_machine/helper/event_machine_debug.h new file mode 100644 index 00000000..0354ce20 --- /dev/null +++ b/include/event_machine/helper/event_machine_debug.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef EVENT_MACHINE_DEBUG_H_ +#define EVENT_MACHINE_DEBUG_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * Event Machine helper functions for debug support + * + * Not for normal application use, may lower performance or cause latency. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * EM dispatcher debug timestamp points + * EM_DEBUG_TSP_SCHED_ENTRY: EM core local timestamp taken by the dispatcher + * _before_ asking the scheduler for new events. + * EM_DEBUG_TSP_SCHED_RETURN: EM core local timestamp taken by the dispatcher + * _after_ returning from the scheduler. + */ +typedef enum { + EM_DEBUG_TSP_SCHED_ENTRY, /* timestamp at scheduler entry */ + EM_DEBUG_TSP_SCHED_RETURN, /* timestamp at scheduler return */ + EM_DEBUG_TSP_LAST +} em_debug_tsp_t; + +/** + * Returns a per core timestamp from the EM dispatcher. + * + * Not intended for normal application use! + * These debug timestamps are disabled by default and must be enabled by the + * user (see configure option '--enable-debug-timestamps=...' or the + * EM_DEBUG_TIMESTAMP_ENABLE define). + * + * Timestamps are taken with odp_time_global/_strict() and converted to ns. + * The timestamps can be used to e.g. measure the EM dispatcher overhead from + * EM_DEBUG_TSP_SCHED_RETURN to the EO-receive() including all code and hooks + * in between. + * + * If debug timestamps are disabled or the given timestamp point does not exist, + * 0 will be returned. + * + * @param tsp timestamp point, selects which EM internal timestamp to return + * + * @return timestamp in ns + * @retval 0 if debug timestamps are disabled or the given timestamp point does not exist + * + * @see em_debug_tsp_t + */ +uint64_t em_debug_timestamp(em_debug_tsp_t tsp); + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_DEBUG_H_ */ diff --git a/include/event_machine/platform/env/env_bitmask.h b/include/event_machine/platform/env/env_bitmask.h index d85b347a..88bd7e52 100644 --- a/include/event_machine/platform/env/env_bitmask.h +++ b/include/event_machine/platform/env/env_bitmask.h @@ -1,304 +1,302 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * Env bit mask functions - don't include this file directly, - * instead include "environment.h" - */ - -#ifndef _ENV_BITMASK_H_ -#define _ENV_BITMASK_H_ - -#pragma GCC visibility push(default) - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -/* - * Type for a bit mask. - */ -typedef struct { - odp_cpumask_t odp_cpumask; -} env_bitmask_t; - -/** - * Zero the whole mask. - * - * @param mask Bit mask - */ -static inline void env_bitmask_zero(env_bitmask_t *mask) -{ - odp_cpumask_zero(&mask->odp_cpumask); -} - -/** - * Set a bit in the mask. - * - * @param bit Bit id - * @param mask Bit mask - */ -static inline void env_bitmask_set(int bit, env_bitmask_t *mask) -{ - odp_cpumask_set(&mask->odp_cpumask, bit); -} - -/** - * Clear a bit in the mask. - * - * @param bit Bit id - * @param mask Bit mask - */ -static inline void env_bitmask_clr(int bit, env_bitmask_t *mask) -{ - odp_cpumask_clr(&mask->odp_cpumask, bit); -} - -/** - * Test if a bit is set in the mask. - * - * @param bit Bit id - * @param mask Bit mask - * - * @return Non-zero if bit id is set in the mask - */ -static inline int env_bitmask_isset(int bit, const env_bitmask_t *mask) -{ - return odp_cpumask_isset(&mask->odp_cpumask, bit); -} - -/** - * Test if the mask is all zero. - * - * @param mask Bit mask - * - * @return Non-zero if the mask is all zero - */ -static inline int env_bitmask_iszero(const env_bitmask_t *mask) -{ - odp_cpumask_t zero_mask; - - odp_cpumask_zero(&zero_mask); - - return odp_cpumask_equal(&zero_mask, &mask->odp_cpumask); -} - -/** - * Test if two masks are equal - * - * @param mask1 First bit mask - * @param mask2 Second bit mask - * - * @return Non-zero if the two masks are equal - */ -static inline int env_bitmask_equal(const env_bitmask_t *mask1, - const env_bitmask_t *mask2) -{ - return odp_cpumask_equal(&mask1->odp_cpumask, &mask2->odp_cpumask); -} - -/** - * Set a range (0...count-1) of bits in the mask. - * - * @param count Number of bits to set - * @param mask Bit mask - */ -static inline void env_bitmask_set_count(int count, env_bitmask_t *mask) -{ - int i; - - for (i = 0; i < count; i++) - odp_cpumask_set(&mask->odp_cpumask, i); -} - -/** - * Copy bit mask - * - * @param dst Destination bit mask - * @param src Source bit mask - */ -static inline void env_bitmask_copy(env_bitmask_t *dst, - const env_bitmask_t *src) -{ - odp_cpumask_copy(&dst->odp_cpumask, &src->odp_cpumask); -} - -/** - * Count the number of bits set in the mask. - * - * @param mask Bit mask - * - * @return Number of bits set - */ -static inline int env_bitmask_count(const env_bitmask_t *mask) -{ - return odp_cpumask_count(&mask->odp_cpumask); -} - -/** - * Set specified bits from 'bits[]' in bit mask. - * - * bit 0: bits[0] = 0x1 (len = 1) - * bit 1: bits[0] = 0x2 (len = 1) - * ... - * bit 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) - * bit 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) - * ... - * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) - * ... - * @param bits[] array of uint64_t:s containing the bits to set in the bit mask - * @param len number of array elements in bits[]. - * @param mask bit mask to set. - * - * @note bits ar 'or'ed into mask, so any previously set bits will remain set. - */ -static inline void env_bitmask_set_bits(const uint64_t bits[], int len, - env_bitmask_t *mask) -{ - (void)bits; - (void)len; - (void)mask; - - fprintf(stderr, "%s() function not implemented!\n", __func__); -} - -/** - * Get bit mask, stored in a uint64_t array for the user - * - * bit 0: bits[0] = 0x1 (len = 1) - * bit 1: bits[0] = 0x2 (len = 1) - * ... - * bit 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) - * bit 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) - * ... - * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) - * ... - * @param[out] bits[] array of uint64_t:s that the bit mask will be stored in - * @param len number of array elements in bits[]. - * @param mask bit mask to get bits from. - * - * @return The number of uint64_t:s written into bits[]. - */ -static inline int env_bitmask_get_bits(uint64_t bits[/*out*/], int len, - const env_bitmask_t *mask) -{ - (void)bits; - (void)len; - (void)mask; - - fprintf(stderr, "%s() function not implemented!\n", __func__); - - return 0; -} - -/** - * Return the index (position) of the Nth set bit in the bit mask - * - * @param n Nth set bit, note n=1 means first set bit, n=[1...MaxCores] - * @param mask bit mask - * - * @return Index of the Nth set bit, <0 on error or if no such bit. - */ -static inline int env_bitmask_idx(int n, const env_bitmask_t *mask) -{ - if (unlikely((unsigned int)(n - 1) >= ODP_CPUMASK_SIZE)) - return -1; - - int i = 1; - int cpu = odp_cpumask_first(&mask->odp_cpumask); - - while (cpu >= 0 && i < n) { - cpu = odp_cpumask_next(&mask->odp_cpumask, cpu); - i++; - } - - return cpu; -} - -/** - * Bitwise AND operation on two masks, store the result in 'dst' - * - * dst = src1 & src2 - * - * @param dst destination bit mask, result is stored here - * @param src1 source mask #1 - * @param scr2 source mask #2 - */ -static inline void env_bitmask_and(env_bitmask_t *dst, - const env_bitmask_t *src1, - const env_bitmask_t *src2) -{ - odp_cpumask_and(&dst->odp_cpumask, - &src1->odp_cpumask, &src2->odp_cpumask); -} - -/** - * Bitwise OR operation on two masks, store the result in 'dst' - * - * dst = src1 | src2 - * - * @param dst destination bit mask, result is stored here - * @param src1 source mask #1 - * @param scr2 source mask #2 - */ -static inline void env_bitmask_or(env_bitmask_t *dst, - const env_bitmask_t *src1, - const env_bitmask_t *src2) -{ - odp_cpumask_or(&dst->odp_cpumask, - &src1->odp_cpumask, &src2->odp_cpumask); -} - -/** - * Bitwise XOR operation on two masks, store the result in 'dst' - * - * dst = src1 ^ src2 - * - * @param dst destination bit mask, result is stored here - * @param src1 source mask #1 - * @param scr2 source mask #2 - */ -static inline void env_bitmask_xor(env_bitmask_t *dst, - const env_bitmask_t *src1, - const env_bitmask_t *src2) -{ - odp_cpumask_xor(&dst->odp_cpumask, - &src1->odp_cpumask, &src2->odp_cpumask); -} - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* _ENV_BITMASK_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * Env bit mask functions - don't include this file directly, + * instead include "environment.h" + */ + +#ifndef _ENV_BITMASK_H_ +#define _ENV_BITMASK_H_ + +#pragma GCC visibility push(default) + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/* + * Type for a bit mask. + */ +typedef struct { + odp_cpumask_t odp_cpumask; +} env_bitmask_t; + +/** + * Zero the whole mask. + * + * @param mask Bit mask + */ +static inline void env_bitmask_zero(env_bitmask_t *mask) +{ + odp_cpumask_zero(&mask->odp_cpumask); +} + +/** + * Set a bit in the mask. + * + * @param bit Bit id + * @param mask Bit mask + */ +static inline void env_bitmask_set(int bit, env_bitmask_t *mask) +{ + odp_cpumask_set(&mask->odp_cpumask, bit); +} + +/** + * Clear a bit in the mask. + * + * @param bit Bit id + * @param mask Bit mask + */ +static inline void env_bitmask_clr(int bit, env_bitmask_t *mask) +{ + odp_cpumask_clr(&mask->odp_cpumask, bit); +} + +/** + * Test if a bit is set in the mask. + * + * @param bit Bit id + * @param mask Bit mask + * + * @return Non-zero if bit id is set in the mask + */ +static inline int env_bitmask_isset(int bit, const env_bitmask_t *mask) +{ + return odp_cpumask_isset(&mask->odp_cpumask, bit); +} + +/** + * Test if the mask is all zero. + * + * @param mask Bit mask + * + * @return Non-zero if the mask is all zero + */ +static inline int env_bitmask_iszero(const env_bitmask_t *mask) +{ + odp_cpumask_t zero_mask; + + odp_cpumask_zero(&zero_mask); + + return odp_cpumask_equal(&zero_mask, &mask->odp_cpumask); +} + +/** + * Test if two masks are equal + * + * @param mask1 First bit mask + * @param mask2 Second bit mask + * + * @return Non-zero if the two masks are equal + */ +static inline int env_bitmask_equal(const env_bitmask_t *mask1, + const env_bitmask_t *mask2) +{ + return odp_cpumask_equal(&mask1->odp_cpumask, &mask2->odp_cpumask); +} + +/** + * Set a range (0...count-1) of bits in the mask. + * + * @param count Number of bits to set + * @param mask Bit mask + */ +static inline void env_bitmask_set_count(int count, env_bitmask_t *mask) +{ + for (int i = 0; i < count; i++) + odp_cpumask_set(&mask->odp_cpumask, i); +} + +/** + * Copy bit mask + * + * @param dst Destination bit mask + * @param src Source bit mask + */ +static inline void env_bitmask_copy(env_bitmask_t *dst, + const env_bitmask_t *src) +{ + odp_cpumask_copy(&dst->odp_cpumask, &src->odp_cpumask); +} + +/** + * Count the number of bits set in the mask. + * + * @param mask Bit mask + * + * @return Number of bits set + */ +static inline int env_bitmask_count(const env_bitmask_t *mask) +{ + return odp_cpumask_count(&mask->odp_cpumask); +} + +/** + * Set specified bits from 'bits[]' in bit mask. + * + * bit 0: bits[0] = 0x1 (len = 1) + * bit 1: bits[0] = 0x2 (len = 1) + * ... + * bit 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) + * bit 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) + * ... + * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) + * ... + * @param bits[] array of uint64_t:s containing the bits to set in the bit mask + * @param len number of array elements in bits[]. + * @param mask bit mask to set. + * + * @note bits ar 'or'ed into mask, so any previously set bits will remain set. + */ +static inline void env_bitmask_set_bits(const uint64_t bits[], int len, + env_bitmask_t *mask) +{ + (void)bits; + (void)len; + (void)mask; + + fprintf(stderr, "%s() function not implemented!\n", __func__); +} + +/** + * Get bit mask, stored in a uint64_t array for the user + * + * bit 0: bits[0] = 0x1 (len = 1) + * bit 1: bits[0] = 0x2 (len = 1) + * ... + * bit 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) + * bit 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) + * ... + * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) + * ... + * @param[out] bits[] array of uint64_t:s that the bit mask will be stored in + * @param len number of array elements in bits[]. + * @param mask bit mask to get bits from. + * + * @return The number of uint64_t:s written into bits[]. + */ +static inline int env_bitmask_get_bits(uint64_t bits[/*out*/], int len, + const env_bitmask_t *mask) +{ + (void)bits; + (void)len; + (void)mask; + + fprintf(stderr, "%s() function not implemented!\n", __func__); + + return 0; +} + +/** + * Return the index (position) of the Nth set bit in the bit mask + * + * @param n Nth set bit, note n=1 means first set bit, n=[1...MaxCores] + * @param mask bit mask + * + * @return Index of the Nth set bit, <0 on error or if no such bit. + */ +static inline int env_bitmask_idx(int n, const env_bitmask_t *mask) +{ + if (unlikely((unsigned int)(n - 1) >= ODP_CPUMASK_SIZE)) + return -1; + + int i = 1; + int cpu = odp_cpumask_first(&mask->odp_cpumask); + + while (cpu >= 0 && i < n) { + cpu = odp_cpumask_next(&mask->odp_cpumask, cpu); + i++; + } + + return cpu; +} + +/** + * Bitwise AND operation on two masks, store the result in 'dst' + * + * dst = src1 & src2 + * + * @param dst destination bit mask, result is stored here + * @param src1 source mask #1 + * @param scr2 source mask #2 + */ +static inline void env_bitmask_and(env_bitmask_t *dst, + const env_bitmask_t *src1, + const env_bitmask_t *src2) +{ + odp_cpumask_and(&dst->odp_cpumask, + &src1->odp_cpumask, &src2->odp_cpumask); +} + +/** + * Bitwise OR operation on two masks, store the result in 'dst' + * + * dst = src1 | src2 + * + * @param dst destination bit mask, result is stored here + * @param src1 source mask #1 + * @param scr2 source mask #2 + */ +static inline void env_bitmask_or(env_bitmask_t *dst, + const env_bitmask_t *src1, + const env_bitmask_t *src2) +{ + odp_cpumask_or(&dst->odp_cpumask, + &src1->odp_cpumask, &src2->odp_cpumask); +} + +/** + * Bitwise XOR operation on two masks, store the result in 'dst' + * + * dst = src1 ^ src2 + * + * @param dst destination bit mask, result is stored here + * @param src1 source mask #1 + * @param scr2 source mask #2 + */ +static inline void env_bitmask_xor(env_bitmask_t *dst, + const env_bitmask_t *src1, + const env_bitmask_t *src2) +{ + odp_cpumask_xor(&dst->odp_cpumask, + &src1->odp_cpumask, &src2->odp_cpumask); +} + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* _ENV_BITMASK_H_ */ diff --git a/include/event_machine/platform/env/env_spinlock.h b/include/event_machine/platform/env/env_spinlock.h index 14b68886..8c479a85 100644 --- a/include/event_machine/platform/env/env_spinlock.h +++ b/include/event_machine/platform/env/env_spinlock.h @@ -1,74 +1,74 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - * env helper include file - don't include this file directly, - * instead #include - */ - -#ifndef _ENV_SPINLOCK_H_ -#define _ENV_SPINLOCK_H_ - -#pragma GCC visibility push(default) - -typedef odp_spinlock_t env_spinlock_t; - -static inline void -env_spinlock_init(env_spinlock_t *const lock) -{ - odp_spinlock_init((odp_spinlock_t *)lock); -} - -static inline void -env_spinlock_lock(env_spinlock_t *const lock) -{ - odp_spinlock_lock((odp_spinlock_t *)lock); -} - -static inline int -env_spinlock_trylock(env_spinlock_t *const lock) -{ - return odp_spinlock_trylock((odp_spinlock_t *)lock); -} - -static inline int -env_spinlock_is_locked(env_spinlock_t *const lock) -{ - return odp_spinlock_is_locked((odp_spinlock_t *)lock); -} - -static inline void -env_spinlock_unlock(env_spinlock_t *const lock) -{ - odp_spinlock_unlock((odp_spinlock_t *)lock); -} - -#pragma GCC visibility pop -#endif /* _ENV_SPINLOCK_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * env helper include file - don't include this file directly, + * instead #include + */ + +#ifndef _ENV_SPINLOCK_H_ +#define _ENV_SPINLOCK_H_ + +#pragma GCC visibility push(default) + +typedef odp_spinlock_t env_spinlock_t; + +static inline void +env_spinlock_init(env_spinlock_t *const lock) +{ + odp_spinlock_init(lock); +} + +static inline void +env_spinlock_lock(env_spinlock_t *const lock) +{ + odp_spinlock_lock(lock); +} + +static inline int +env_spinlock_trylock(env_spinlock_t *const lock) +{ + return odp_spinlock_trylock(lock); +} + +static inline int +env_spinlock_is_locked(env_spinlock_t *const lock) +{ + return odp_spinlock_is_locked(lock); +} + +static inline void +env_spinlock_unlock(env_spinlock_t *const lock) +{ + odp_spinlock_unlock(lock); +} + +#pragma GCC visibility pop +#endif /* _ENV_SPINLOCK_H_ */ diff --git a/include/event_machine/platform/env/environment.h b/include/event_machine/platform/env/environment.h index 566b0b9b..2ec8db08 100644 --- a/include/event_machine/platform/env/environment.h +++ b/include/event_machine/platform/env/environment.h @@ -1,120 +1,133 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _ENVIRONMENT_H_ -#define _ENVIRONMENT_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * - * Environment header file - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#include -/* env generic macros */ -#include -/* env configuration affecting other env files */ -#include - -/** - * Thread local vars - */ -#define ENV_LOCAL __thread - -/** - * Cache line size - */ -#define ENV_CACHE_LINE_SIZE ODP_CACHE_LINE_SIZE - -/** - * Cache line size round up - */ -#define ENV_CACHE_LINE_SIZE_ROUNDUP(x) \ - ((((x) + ENV_CACHE_LINE_SIZE - 1) / ENV_CACHE_LINE_SIZE) \ - * ENV_CACHE_LINE_SIZE) - -#define ENV_ALIGNED(x) ODP_ALIGNED(x) - -/** - * Cache line alignment - */ -#define ENV_CACHE_LINE_ALIGNED ODP_ALIGNED_CACHE - -/* - * Cache Prefetch-macros - */ -/** Prefetch into all cache levels */ -#define ENV_PREFETCH(addr) odp_prefetch((addr)) - -/* - * env helper include files - don't include these files directly, - * instead #include - */ -#include -#include -#include -#include -#include -#include - -/** - * Panic - */ -#define env_panic(...) abort() - -static inline uint64_t env_get_cycle(void) -{ - return odp_cpu_cycles(); -} - -static inline void env_sync_mem(void) -{ - odp_mb_full(); -} - -static inline uint64_t env_core_hz(void) -{ - return odp_cpu_hz(); -} - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* _ENVIRONMENT_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ENVIRONMENT_H_ +#define _ENVIRONMENT_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * + * Environment header file + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include +/* env generic macros */ +#include +/* env configuration affecting other env files */ +#include + +/** + * Thread local vars + */ +#define ENV_LOCAL __thread + +/** + * Cache line size + */ +#define ENV_CACHE_LINE_SIZE ODP_CACHE_LINE_SIZE + +/** + * Cache line size round up + */ +#define ENV_CACHE_LINE_SIZE_ROUNDUP(x) \ + ((((x) + ENV_CACHE_LINE_SIZE - 1) / ENV_CACHE_LINE_SIZE) \ + * ENV_CACHE_LINE_SIZE) + +#define ENV_ALIGNED(x) ODP_ALIGNED(x) + +/** + * Cache line alignment + */ +#define ENV_CACHE_LINE_ALIGNED ODP_ALIGNED_CACHE + +/* + * Cache Prefetch-macros + */ +/** Prefetch into all cache levels */ +#define ENV_PREFETCH(addr) odp_prefetch((addr)) + +/* + * env helper include files - don't include these files directly, + * instead #include + */ +#include +#include +#include +#include +#include +#include + +/** + * Panic + */ +#define env_panic(...) abort() + +static inline uint64_t env_get_cycle(void) +{ + return odp_cpu_cycles(); +} + +/** + * Returns difference of cpu cycles (cycles2 - cycles1). + * + * @param cycles2 Second cycle count + * @param cycles1 First cycle count + * + * @return Difference between given cpu cycles + */ +static inline uint64_t env_cycles_diff(uint64_t cycles2, uint64_t cycles1) +{ + return odp_cpu_cycles_diff(cycles2, cycles1); +} + +static inline void env_sync_mem(void) +{ + odp_mb_full(); +} + +static inline uint64_t env_core_hz(void) +{ + return odp_cpu_hz(); +} + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* _ENVIRONMENT_H_ */ diff --git a/include/event_machine/platform/event_machine_config.h b/include/event_machine/platform/event_machine_config.h index e72f90f8..86ba96ed 100644 --- a/include/event_machine/platform/event_machine_config.h +++ b/include/event_machine/platform/event_machine_config.h @@ -1,269 +1,311 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine configuration options - */ - -#ifndef EVENT_MACHINE_CONFIG_H -#define EVENT_MACHINE_CONFIG_H - -#pragma GCC visibility push(default) - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef EM_64_BIT -/** - * @page page_version 64-bit version - * This documentation represent the 64-bit version of Event Machine API. - * Define EM_64_BIT or EM_32_BIT to select between 64- and 32-bit versions. - */ -#elif defined(EM_32_BIT) -/** - * @page page_version 32-bit version - * This documentation represent the 32-bit version of Event Machine API. - * Define EM_64_BIT or EM_32_BIT to select between 64- and 32-bit versions. - */ -#else -#error Missing architecture definition. Define EM_64_BIT or EM_32_BIT! -/** - * @page page_version 64/32-bit version not selected - * This documentation has not selected between 64/32-bit version of - * the Event Machine API. Some types might be missing. - * Define EM_64_BIT or EM_32_BIT to select between 64- and 32-bit - * versions. - */ -#endif - -/** - * @def EM_HANDLE_T - * Define 'type_t' as a struct ptr to improve type safety - */ -#define EM_HANDLE_T(type_t) \ - typedef struct _##type_t { \ - void *unused; \ - } *(type_t) - -/** - * @def EM_STATIC_CAST - * Support C++ static casts in EM API files - */ -#ifndef __cplusplus -#define EM_STATIC_CAST(type, value) ((type)(value)) -#else -#define EM_STATIC_CAST(type, value) (static_cast < type > (value)) -#endif - -/** - * @def EM_REINTERPRET_CAST - * Support C++ reinterpret casts in EM API files - */ -#ifndef __cplusplus -#define EM_REINTERPRET_CAST(type, value) ((type)(value)) -#else -#define EM_REINTERPRET_CAST(type, value) (reinterpret_cast < type > (value)) -#endif - -/** - * @def EM_HDL_UNDEF - * Undefined EM-handle - */ -#ifndef __cplusplus -#define EM_HDL_UNDEF NULL -#else -#define EM_HDL_UNDEF nullptr -#endif - -/** - * @def PRI_HDL - * EM-handle printf format - */ -#define PRI_HDL "p" - -/** - * @def EM_CONFIG_POOLS - * Maximum number of EM pools - */ -#define EM_CONFIG_POOLS 16 - -/** - * @def EM_MAX_QUEUES - * Maximum total number of queues - */ -#define EM_MAX_QUEUES 1024 /* Should be <= odp-max-queues */ - -/** - * @def EM_QUEUE_NAME_LEN - * Maximum queue name string length - */ -#define EM_QUEUE_NAME_LEN 32 - -/** - * @def EM_MAX_ATOMIC_GROUPS - * Maximum number of EM atomic groups - */ -#define EM_MAX_ATOMIC_GROUPS 128 - -/** - * @def EM_ATOMIC_GROUP_NAME_LEN - * Max atomic group name length - */ -#define EM_ATOMIC_GROUP_NAME_LEN 32 - -/** - * @def EM_MAX_EOS - * Maximum total number of EOs - */ -#define EM_MAX_EOS 512 - -/** - * @def EM_EO_NAME_LEN - * Maximum EO name string length - */ -#define EM_EO_NAME_LEN 32 - -/** - * @def EM_MAX_EVENT_GROUPS - * Maximum number of event groups - */ -#define EM_MAX_EVENT_GROUPS 1024 - -/** - * @def EM_EVENT_GROUP_MAX_NOTIF - * Maximum number of notifications - */ -#define EM_EVENT_GROUP_MAX_NOTIF 6 - -/* - * @def EM_DISPATCH_CALLBACKS_ENABLE - * Enable dispatcher callback functions - */ -#define EM_DISPATCH_CALLBACKS_ENABLE 1 - -/** - * @def EM_API_HOOKS_ENABLE - * Enable the usage of EM API hooks - * - * User provided API hook functions can be provided via em_init(). EM will - * call the given hooks each time the corresponding API function is called. - */ -#define EM_API_HOOKS_ENABLE 1 - -/** - * @def EM_CALLBACKS_MAX - * Maximum number of EM callbacks/hooks that can be registered. - * - * The user may register up to the number 'EM_CALLBACKS_MAX' of each - * callback/hook. API-hooks, such as the alloc-, free- and send-hook, or - * dispatcher callbacks, such as the enter- and exit-callbacks, can be - * registered each up to this limit. - */ -#define EM_CALLBACKS_MAX 8 - -/** - * @def EM_CHECK_LEVEL - * Error check level - * - * Conditionally compiled error checking level, range 0...3 - * Level 0 does not do any runtime argument checking (be careful!) - * Level 1 adds minimum checks - * Level 2 adds most checks except the slowest ones - * Level 3 adds all checks and gives lowest performance - * - * @note em-odp: the 'EM_CHECK_LEVEL' value can be overridden by a command-line - * option to the 'configure' script, e.g.: - * $build> ../configure ... --enable-check-level=3 - * The overridden value will be made available to the application - * via a pkgconfig set define. - */ -#ifndef EM_CHECK_LEVEL -#define EM_CHECK_LEVEL 1 -#endif - -/** - * @def EM_ESV_ENABLE - * Event State Verification (ESV) - * - * '0': disabled - * '1': enabled - event state is verified when the event is passed from EM to - * the user (e.g. dispatch) and from the user to EM (e.g. send) - * to catch illegal usage patterns like double-send, double-free, - * usage-after-send etc. - * - * Also see the config/em-odp.conf file for ESV runtime options! - * - * @note em-odp: the 'EM_ESV_ENABLE' value can be overridden by a command-line - * option to the 'configure' script, e.g.: - * $build> ../configure ... --enable-esv - * The overridden value will be made available to the application - * via a pkgconfig set define. - */ -#ifndef EM_ESV_ENABLE -#define EM_ESV_ENABLE 0 -#endif - -/** - * @def EM_DEBUG_PRINT - * Event Machine Debug Printouts - * - * '0': disabled - * '1': enabled - * - * @note em-odp: the 'EM_DEBUG_PRINT' value can be overridden by a command-line - * option to the 'configure' script, e.g.: - * $build> ../configure ... --enable-debug-print - * The overridden value will be made available to the application - * via a pkgconfig set define. - */ -#ifndef EM_DEBUG_PRINT -#define EM_DEBUG_PRINT 0 -#endif - -/** - * @def EM_EVENT_GROUP_SAFE_MODE - * Guards event groups in undefined and error situations - * - * Excess and aborted group events don't belong to a valid group when received. - * Most event group APIs check if the core local event group has expired during - * receive function. Impacts performance when event groups are used. - */ -#define EM_EVENT_GROUP_SAFE_MODE 1 - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_CONFIG_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine configuration options + */ + +#ifndef EVENT_MACHINE_CONFIG_H +#define EVENT_MACHINE_CONFIG_H + +#pragma GCC visibility push(default) + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef EM_64_BIT +/** + * @page page_version 64-bit version + * This documentation represent the 64-bit version of Event Machine API. + * Define EM_64_BIT or EM_32_BIT to select between 64- and 32-bit versions. + */ +#elif defined(EM_32_BIT) +/** + * @page page_version 32-bit version + * This documentation represent the 32-bit version of Event Machine API. + * Define EM_64_BIT or EM_32_BIT to select between 64- and 32-bit versions. + */ +#else +#error Missing architecture definition. Define EM_64_BIT or EM_32_BIT! +/** + * @page page_version 64/32-bit version not selected + * This documentation has not selected between 64/32-bit version of + * the Event Machine API. Some types might be missing. + * Define EM_64_BIT or EM_32_BIT to select between 64- and 32-bit + * versions. + */ +#endif + +/** + * @def EM_HANDLE_T + * Define 'type_t' as a struct ptr to improve type safety + */ +#define EM_HANDLE_T(type_t) \ + typedef struct _##type_t { \ + void *unused; \ + } *(type_t) + +/** + * @def EM_STATIC_CAST + * Support C++ static casts in EM API files + */ +#ifndef __cplusplus +#define EM_STATIC_CAST(type, value) ((type)(value)) +#else +#define EM_STATIC_CAST(type, value) (static_cast < type > (value)) +#endif + +/** + * @def EM_REINTERPRET_CAST + * Support C++ reinterpret casts in EM API files + */ +#ifndef __cplusplus +#define EM_REINTERPRET_CAST(type, value) ((type)(value)) +#else +#define EM_REINTERPRET_CAST(type, value) (reinterpret_cast < type > (value)) +#endif + +/** + * @def EM_HDL_UNDEF + * Undefined EM-handle + */ +#ifndef __cplusplus +#define EM_HDL_UNDEF NULL +#else +#define EM_HDL_UNDEF nullptr +#endif + +/** + * @def PRI_HDL + * EM-handle printf format + */ +#define PRI_HDL "p" + +/** + * @def EM_CONFIG_POOLS + * Maximum number of EM pools + */ +#define EM_CONFIG_POOLS 16 + +/** + * @def EM_MAX_QUEUES + * Maximum total number of queues + */ +#define EM_MAX_QUEUES 1024 /* Should be <= odp-max-queues */ + +/** + * @def EM_QUEUE_NAME_LEN + * Maximum queue name string length + */ +#define EM_QUEUE_NAME_LEN 32 + +/** + * @def EM_MAX_ATOMIC_GROUPS + * Maximum number of EM atomic groups + */ +#define EM_MAX_ATOMIC_GROUPS 128 + +/** + * @def EM_ATOMIC_GROUP_NAME_LEN + * Max atomic group name length + */ +#define EM_ATOMIC_GROUP_NAME_LEN 32 + +/** + * @def EM_MAX_EOS + * Maximum total number of EOs + */ +#define EM_MAX_EOS 512 + +/** + * @def EM_EO_NAME_LEN + * Maximum EO name string length + */ +#define EM_EO_NAME_LEN 32 + +/** + * @def EM_MAX_EVENT_GROUPS + * Maximum number of event groups + */ +#define EM_MAX_EVENT_GROUPS 1024 + +/** + * @def EM_EVENT_GROUP_MAX_NOTIF + * Maximum number of notifications + */ +#define EM_EVENT_GROUP_MAX_NOTIF 6 + +/* + * @def EM_DISPATCH_CALLBACKS_ENABLE + * Enable dispatcher callback functions + */ +#define EM_DISPATCH_CALLBACKS_ENABLE 1 + +/** + * @def EM_API_HOOKS_ENABLE + * Enable the usage of EM API hooks + * + * User provided API hook functions can be provided via em_init(). EM will + * call the given hooks each time the corresponding API function is called. + */ +#define EM_API_HOOKS_ENABLE 1 + +/** + * @def EM_IDLE_HOOKS_ENABLE + * Enable the usage of EM idle hooks + * + * User provided idle hook functions can be provided via em_conf_t::idle_hooks + * when calling em_init() or via their register functions. EM will call the + * given hooks in the dispatcher depending on whether there are events to be + * processed by the core. + * + * @note em-odp: the 'EM_IDLE_HOOKS_ENABLE' value can be overridden by a + * command-line option to the 'configure' script, e.g.: + * $build> ../configure ... --enable-idle-hooks + * The overridden value will be made available to the application + * via a pkgconfig set define. + */ +#ifndef EM_IDLE_HOOKS_ENABLE +#define EM_IDLE_HOOKS_ENABLE 0 +#endif + +/** + * @def EM_CALLBACKS_MAX + * Maximum number of EM callbacks/hooks that can be registered. + * + * The user may register up to the number 'EM_CALLBACKS_MAX' of each + * callback/hook. API-hooks, such as the alloc-, free- and send-hook, or + * dispatcher callbacks, such as the enter- and exit-callbacks as well as + * idle-hooks can be registered each up to this limit. + */ +#define EM_CALLBACKS_MAX 8 + +/** + * @def EM_CHECK_LEVEL + * Error check level + * + * Conditionally compiled error checking level, range 0...3 + * Level 0 does not do any runtime argument checking (be careful!) + * Level 1 adds minimum checks + * Level 2 adds most checks except the slowest ones + * Level 3 adds all checks and gives lowest performance + * + * @note em-odp: the 'EM_CHECK_LEVEL' value can be overridden by a command-line + * option to the 'configure' script, e.g.: + * $build> ../configure ... --enable-check-level=3 + * The overridden value will be made available to the application + * via a pkgconfig set define. + */ +#ifndef EM_CHECK_LEVEL +#define EM_CHECK_LEVEL 1 +#endif + +/** + * @def EM_ESV_ENABLE + * Event State Verification (ESV) + * + * '0': disabled + * '1': enabled - event state is verified when the event is passed from EM to + * the user (e.g. dispatch) and from the user to EM (e.g. send) + * to catch illegal usage patterns like double-send, double-free, + * usage-after-send etc. + * + * Also see the config/em-odp.conf file for ESV runtime options! + * + * @note em-odp: the 'EM_ESV_ENABLE' value can be overridden by a command-line + * option to the 'configure' script, e.g.: + * $build> ../configure ... --enable-esv + * The overridden value will be made available to the application + * via a pkgconfig set define. + */ +#ifndef EM_ESV_ENABLE +#define EM_ESV_ENABLE 0 +#endif + +/** + * @def EM_DEBUG_PRINT + * Event Machine Debug Printouts + * + * '0': disabled + * '1': enabled + * + * @note em-odp: the 'EM_DEBUG_PRINT' value can be overridden by a command-line + * option to the 'configure' script, e.g.: + * $build> ../configure ... --enable-debug-print + * The overridden value will be made available to the application + * via a pkgconfig set define. + */ +#ifndef EM_DEBUG_PRINT +#define EM_DEBUG_PRINT 0 +#endif + +/** + * @def EM_EVENT_GROUP_SAFE_MODE + * Guards event groups in undefined and error situations + * + * Excess and aborted group events don't belong to a valid group when received. + * Most event group APIs check if the core local event group has expired during + * receive function. Impacts performance when event groups are used. + */ +#define EM_EVENT_GROUP_SAFE_MODE 1 + +/** + * @def EM_DEBUG_TIMESTAMPS + * Enable Debug Timestamps for timing analysis. This may reduce performance + * but allows to trace dispatcher timings. Timestamps are per dispatcher (thread + * local). + * + * '0': disabled (default) + * '1': enabled, lower overhead but potentially inaccurate (no HW barriers) + * '2': enabled, strict version with full HW barriers + * + * @note em-odp: the 'EM_DEBUG_TIMESTAMPS' value can be overridden by a command-line + * option to the 'configure' script, e.g.: + * $build> ../configure ... --enable-debug-timestamps=N + * The overridden value will be made available to the application + * via a pkgconfig set define. Use value 1 for lower overhead timestamps + * and value 2 for strict timestamp with HW barriers. + * + * @see event_machine_helper.h + */ +#ifndef EM_DEBUG_TIMESTAMP_ENABLE +#define EM_DEBUG_TIMESTAMP_ENABLE 0 +#endif + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_CONFIG_H */ diff --git a/include/event_machine/platform/event_machine_hooks.h b/include/event_machine/platform/event_machine_hooks.h index 70df352c..b5004863 100644 --- a/include/event_machine/platform/event_machine_hooks.h +++ b/include/event_machine/platform/event_machine_hooks.h @@ -1,289 +1,462 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_HOOKS_H_ -#define EVENT_MACHINE_HOOKS_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_hooks API-hooks - * Event Machine API-callback hooks. - * @{ - * - * EM API-callback hook functions can be registered for a selected set of - * EM APIs. The EM APIs in question are mostly fast path APIs, like em_send(), - * em_alloc() and em_free(). Control APIs generally do not need hook support. - * A registered user provided hook function will be called by EM each time the - * corresponding API is called. - * API-callback hooks enables the user to gather statistics, trace program and - * event flow etc. API hooks should not change the state of the events etc. - * they receive as arguments, nor should they call the same API from within the - * hook to avoid hook recursion. - * Hook support is only available when EM_API_HOOKS_ENABLE != 0. - * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') - * can be registered for a given EM API. The calling order of multiple - * registered API hook functions is the order of registration. If the same - * function is registered twice then it will be called twice. - * - * Do not include this file from the application, event_machine.h will - * do it for you. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * API-callback hook for em_alloc(), em_alloc_multi() and em_event_clone() - * - * The hook will only be called for successful event allocations, passing also - * the newly allocated 'events' to the hook. - * The state and ownership of the events must not be changed by the hook, e.g. - * the events must not be freed or sent etc. Calling em_alloc/_multi() within - * the alloc hook leads to hook recursion and must be avoided. - * - * @note em_alloc(): hook is called with events[1] and num_act = num_req = 1. - * @note em_alloc_multi(): hook is called with events[num_act] and - * num_req >= num_act >= 1 - * - * API-callback hook functions can be called concurrently from different cores. - * - * @param[in] events[] Array of newly allocated events: 'events[num_act]'. - * Don't change the state of the array or the events! - * @param num_act The actual number of events allocated and written into - * 'events[]' (num_act <= num_req). This is the return val - * of em_alloc_multi() if at least one event was allocated - * (the hook is not called if no events were allocated). - * @param num_req The requested number of events to allocate, - * from em_alloc/_multi('num') - * @param size Event size >0, from em_alloc/_multi('size') - * @param type Event type to allocate, from em_alloc/_multi('type') - * @param pool Event pool handle, from em_alloc/_multi('pool') - * - * @see em_alloc(), em_alloc_multi() and em_hooks_register_alloc() - */ -typedef void (*em_api_hook_alloc_t)(const em_event_t events[/*num_act*/], - int num_act, int num_req, size_t size, - em_event_type_t type, em_pool_t pool); - -/** - * API-callback hook for em_free() and em_free_multi(). - * - * The hook will be called before freeing the actual events, after verifying - * that the events given are valid, thus the hook does not 'see' if the actual - * free-operation succeeds or fails. - * The state and ownership of the events must not be changed by the hook, e.g. - * the events must not be freed or sent etc. Calling em_free/_multi() within the - * free hook leads to hook recursion and must be avoided. - * - * @note em_free(): hook is called with events[1] and num = 1. - * @note em_free_multi(): hook is called with events[num] and num >= 1 - * - * API-callback hook functions can be called concurrently from different cores. - * - * @param[in] events[] Array of events to be freed: 'events[num]' - * Don't change the state of the array or the events! - * @param num The number of events in the array 'events[]'. - * - * @see em_free(), em_free_multi() and em_hooks_register_free() - */ -typedef void (*em_api_hook_free_t)(const em_event_t events[], int num); - -/** - * API-callback hook for em_send(), em_send_multi(), em_send_group() and - * em_send_group_multi(). - * - * Sending multiple events with an event group is the most generic - * variant and thus one callback covers all. - * The hook will be called just before sending the actual event(s), thus - * the hook does not 'see' if the actual send operation succeeds or - * fails. - * The state and ownership of the events must not be changed by the - * hook, e.g. the events can not be freed or sent etc. - * Calling em_send...() within the send hook leads to hook recursion and - * must be avoided. - * - * API-callback hook functions can be called concurrently from different cores. - * - * @see - */ -typedef void (*em_api_hook_send_t)(const em_event_t events[], int num, - em_queue_t queue, - em_event_group_t event_group); - -/** - * API-callback hooks provided by the user at start-up (init) - * - * EM API functions will call an API hook if given by the user through this - * struct to em_init(). E.g. em_alloc() will call api_hooks->alloc(...) if - * api_hooks->alloc != NULL. Not all hooks need to be provided, use NULL for - * unsused hooks. - * - * @note Not all EM API funcs have associated hooks, only the most used - * functions (in the fast path) are included. - * Notice that extensive usage or heavy processing in the hooks might - * significantly impact performance since each API call (that has a hook) - * will execute the extra code in the user provided hook. - * - * @note Only used if EM_API_HOOKS_ENABLE != 0 - */ -typedef struct { - /** - * API callback hook for _all_ alloc-variants: - * em_alloc() and em_alloc_multi() - * Initialize to NULL if unused. - */ - em_api_hook_alloc_t alloc_hook; - - /** - * API callback hook for all free-variants: - * em_free() and em_free_multi() - * Initialize to NULL if unused. - */ - em_api_hook_free_t free_hook; - - /** - * API callback hook used for _all_ send-variants: - * em_send(), em_send_multi(), em_send_group() and em_send_group_multi() - * Initialize to NULL if unused. - */ - em_api_hook_send_t send_hook; -} em_api_hooks_t; - -/** - * Register an API-callback hook for em_alloc(). - * - * A registered hook will be called at the end of em_alloc(), but only for - * successful allocs, passing also the newly allocated 'event' to the hook. - * The state and ownership of the event must not be changed by the hook, e.g. - * the event must not be freed or sent etc. Calling em_alloc() within the - * alloc hook leads to hook recursion and must be avoided. - * - * API-callback hook functions can be called concurrently from different cores. - * - * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') - * can be registered. - * The order of calling multiple registered hook functions is the order of - * registration. If same function is registered twice it will be called twice. - * - * @param func API-callback hook function - * @return EM_OK if callback hook registration succeeded - */ -em_status_t -em_hooks_register_alloc(em_api_hook_alloc_t func); - -/** - * Unregister a previously registered em_alloc() callback hook - * - * @param func API-callback hook function - * @return EM_OK if callback hook unregistration succeeded - */ -em_status_t -em_hooks_unregister_alloc(em_api_hook_alloc_t func); - -/** - * Register an API-callback hook for em_free(). - * - * The hook will be called before freeing the actual event, after verifying that - * the event given to em_free() is valid, thus the hook does not 'see' if the - * actual free-operation succeeds or fails. - * The state and ownership of the event must not be changed by the hook, e.g. - * the event must not be freed or sent etc. Calling em_free() within the - * free hook leads to hook recursion and must be avoided. - * - * API-callback hook functions can be called concurrently from different cores. - * - * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') - * can be registered. - * The order of calling multiple registered hook functions is the order of - * registration. If same function is registered twice it will be called twice. - * - * @param func API-callback hook function - * @return EM_OK if callback hook registration succeeded - */ -em_status_t -em_hooks_register_free(em_api_hook_free_t func); - -/** - * Unregister an em_free() callback hook - * - * @param func API-callback hook function - * @return EM_OK if callback hook unregistration succeeded - */ -em_status_t -em_hooks_unregister_free(em_api_hook_free_t func); - -/** - * Register an API-callback hook for em_send(), em_send_multi(), em_send_group() - * and em_send_group_multi(). - * - * Sending multiple events with an event group is the most generic - * variant and thus one callback covers all. - * The hook will be called just before sending the actual event(s), thus - * the hook does not 'see' if the actual send operation succeeds or - * fails. - * The state and ownership of the events must not be changed by the - * hook, e.g. the events can not be freed or sent etc. - * Calling em_send...() within the send hook leads to hook recursion and - * must be avoided. - * - * API-callback hook functions can be called concurrently from different cores. - * - * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') - * can be registered. - * The order of calling multiple registered hook functions is the order of - * registration. If same function is registered twice it will be called twice. - * - * @param func API-callback hook function - * @return EM_OK if callback hook registration succeeded - */ -em_status_t -em_hooks_register_send(em_api_hook_send_t func); - -/** - * Unregister an em_send_...() callback hook - * - * @param func API-callback hook function - * @return EM_OK if callback hook unregistration succeeded - */ -em_status_t -em_hooks_unregister_send(em_api_hook_send_t func); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_HOOKS_H_ */ +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_HOOKS_H_ +#define EVENT_MACHINE_HOOKS_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_hooks API-hooks and Idle hooks + * Event Machine API-callback hooks and Idle hooks. + * @{ + * + * EM API-callback hook functions can be registered for a selected set of + * EM APIs. The EM APIs in question are mostly fast path APIs, like em_send(), + * em_alloc() and em_free(). Control APIs generally do not need hook support. + * A registered user provided hook function will be called by EM each time the + * corresponding API is called. + * API-callback hooks enables the user to gather statistics, trace program and + * event flow etc. API hooks should not change the state of the events etc. + * they receive as arguments, nor should they call the same API from within the + * hook to avoid hook recursion. + * Hook support is only available when EM_API_HOOKS_ENABLE != 0. + * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') + * can be registered for a given EM API. The calling order of multiple + * registered API hook functions is the order of registration. If the same + * function is registered twice then it will be called twice. + * + * EM Idle callback hook functions can be registered for tracking the idle state + * (ACTIVE/IDLE) of EM cores. Idle hooks can be used e.g. to gather application + * load statistics. The idle hooks are called by the EM dispatcher depending on + * whether the core gets events from scheduled or local queues. A core is in the + * ACTIVE state when it gets events from these queues. A core is in the IDLE + * state when it didn't get any events from these queues. + * To_idle hooks are called when a core state changes from ACTIVE to IDLE. + * To_active hooks are called when a core state changes from IDLE to ACTIVE. + * While_idle hooks are called when a core is already in the IDLE state and it + * doesn't get any events from scheduled or local queues. While_idle hooks can + * be called several times when a core is in the IDLE state. + * The user should not make any assumptions of the current idle state of the + * core when registering new idle hooks. + * The idle hook support is only available when EM_IDLE_HOOKS_ENABLE != 0. + * Multiple idle hook functions (up to the number 'EM_CALLBACKS_MAX') can be + * registered for each idle hook type. The calling order of multiple registered + * idle hook functions is the order of registration. If the same function is + * registered twice then it will be called twice. + * + * Do not include this file from the application, event_machine.h will + * do it for you. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * API-callback hook for em_alloc(), em_alloc_multi() and em_event_clone() + * + * The hook will only be called for successful event allocations, passing also + * the newly allocated 'events' to the hook. + * The state and ownership of the events must not be changed by the hook, e.g. + * the events must not be freed or sent etc. Calling em_alloc/_multi() within + * the alloc hook leads to hook recursion and must be avoided. + * + * @note em_alloc(): hook is called with events[1] and num_act = num_req = 1. + * @note em_alloc_multi(): hook is called with events[num_act] and + * num_req >= num_act >= 1 + * + * API-callback hook functions can be called concurrently from different cores. + * + * @param[in] events[] Array of newly allocated events: 'events[num_act]'. + * Don't change the state of the array or the events! + * @param num_act The actual number of events allocated and written into + * 'events[]' (num_act <= num_req). This is the return val + * of em_alloc_multi() if at least one event was allocated + * (the hook is not called if no events were allocated). + * @param num_req The requested number of events to allocate, + * from em_alloc/_multi('num') + * @param size Event size >0, from em_alloc/_multi('size') + * @param type Event type to allocate, from em_alloc/_multi('type') + * @param pool Event pool handle, from em_alloc/_multi('pool') + * + * @see em_alloc(), em_alloc_multi() and em_hooks_register_alloc() + */ +typedef void (*em_api_hook_alloc_t)(const em_event_t events[/*num_act*/], + int num_act, int num_req, uint32_t size, + em_event_type_t type, em_pool_t pool); + +/** + * API-callback hook for em_free() and em_free_multi(). + * + * The hook will be called before freeing the actual events, after verifying + * that the events given are valid, thus the hook does not 'see' if the actual + * free-operation succeeds or fails. + * The state and ownership of the events must not be changed by the hook, e.g. + * the events must not be freed or sent etc. Calling em_free/_multi() within the + * free hook leads to hook recursion and must be avoided. + * + * @note em_free(): hook is called with events[1] and num = 1. + * @note em_free_multi(): hook is called with events[num] and num >= 1 + * + * API-callback hook functions can be called concurrently from different cores. + * + * @param[in] events[] Array of events to be freed: 'events[num]' + * Don't change the state of the array or the events! + * @param num The number of events in the array 'events[]'. + * + * @see em_free(), em_free_multi() and em_hooks_register_free() + */ +typedef void (*em_api_hook_free_t)(const em_event_t events[], int num); + +/** + * API-callback hook for em_send(), em_send_multi(), em_send_group() and + * em_send_group_multi(). + * + * Sending multiple events with an event group is the most generic + * variant and thus one callback covers all. + * The hook will be called just before sending the actual event(s), thus + * the hook does not 'see' if the actual send operation succeeds or + * fails. + * The state and ownership of the events must not be changed by the + * hook, e.g. the events can not be freed or sent etc. + * Calling em_send...() within the send hook leads to hook recursion and + * must be avoided. + * + * API-callback hook functions can be called concurrently from different cores. + * + * @see + */ +typedef void (*em_api_hook_send_t)(const em_event_t events[], int num, + em_queue_t queue, + em_event_group_t event_group); + +/** + * To idle hook + * + * The to_idle hook will be called by the EM dispatcher when a core is entering + * the IDLE state i.e. when the core doesn't get any new events to be processed. + * The to_idle hook is called only when there previously has been events to + * process and the state changes from active to idle. + * + * @param to_idle_delay_ns The delay in nanoseconds that a core was waiting + * for scheduled events before calling to_idle hook + */ +typedef void (*em_idle_hook_to_idle_t)(uint64_t to_idle_delay_ns); + +/** + * To active hook + * + * The to_active hook will be called by the EM dispatcher when a core is + * entering the ACTIVE state i.e. when the core gets events after being idle. + * The to_active hook is called only when the core previously has been in the + * IDLE state and the state changes to active. To_active hooks are called before + * the EO processes the events. + */ +typedef void (*em_idle_hook_to_active_t)(void); + +/** + * While idle hook + * + * The while_idle hook will be called by the EM dispatcher when a core is + * already in the IDLE state and stays in it i.e. the core doesn't get any + * events. The while_idle hook can be called several times until the core state + * changes to active i.e. the core again gets events for processing. + */ +typedef void (*em_idle_hook_while_idle_t)(void); + +/** + * API-callback hooks provided by the user at start-up (init) + * + * EM API functions will call an API hook if given by the user through this + * struct to em_init(). E.g. em_alloc() will call api_hooks->alloc(...) if + * api_hooks->alloc != NULL. Not all hooks need to be provided, use NULL for + * unsused hooks. + * + * @note Not all EM API funcs have associated hooks, only the most used + * functions (in the fast path) are included. + * Notice that extensive usage or heavy processing in the hooks might + * significantly impact performance since each API call (that has a hook) + * will execute the extra code in the user provided hook. + * + * @note Only used if EM_API_HOOKS_ENABLE != 0 + */ +typedef struct { + /** + * API callback hook for _all_ alloc-variants: + * em_alloc() and em_alloc_multi() + * Initialize to NULL if unused. + */ + em_api_hook_alloc_t alloc_hook; + + /** + * API callback hook for all free-variants: + * em_free() and em_free_multi() + * Initialize to NULL if unused. + */ + em_api_hook_free_t free_hook; + + /** + * API callback hook used for _all_ send-variants: + * em_send(), em_send_multi(), em_send_group() and em_send_group_multi() + * Initialize to NULL if unused. + */ + em_api_hook_send_t send_hook; +} em_api_hooks_t; + +/** + * Idle hooks given by the user via this struct to the em_init() will be called + * by the EM dispatcher on each core. + * + * The EM dispatcher will call: + * - to_idle_hook when a core doesn't get any more events from scheduled or + * local queues after the core has been active + * - to_active_hook when a core gets events after being idle + * - while_idle_hook when a core continues being idle + * + * Not all the idle hooks need to be provided, use NULL for unsused idle hooks. + * + * @note Notice that doing heavy processing in the hooks might significantly + * impact performance. + * + * @note Only used if EM_IDLE_HOOKS_ENABLE != 0 + */ +typedef struct { + /** + * Idle hook called when entering the idle state + * Initialize to NULL if unused. + */ + em_idle_hook_to_idle_t to_idle_hook; + /** + * Idle hook called when entering the active state + * Initialize to NULL if unused. + */ + em_idle_hook_to_active_t to_active_hook; + /** + * Idle hook called while remaining in the idle state + * Initialize to NULL if unused. + */ + em_idle_hook_while_idle_t while_idle_hook; +} em_idle_hooks_t; + +/** + * Register an API-callback hook for em_alloc(). + * + * A registered hook will be called at the end of em_alloc(), but only for + * successful allocs, passing also the newly allocated 'event' to the hook. + * The state and ownership of the event must not be changed by the hook, e.g. + * the event must not be freed or sent etc. Calling em_alloc() within the + * alloc hook leads to hook recursion and must be avoided. + * + * API-callback hook functions can be called concurrently from different cores. + * + * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') + * can be registered. + * The order of calling multiple registered hook functions is the order of + * registration. If same function is registered twice it will be called twice. + * + * @param func API-callback hook function + * @return EM_OK if callback hook registration succeeded + */ +em_status_t +em_hooks_register_alloc(em_api_hook_alloc_t func); + +/** + * Unregister a previously registered em_alloc() callback hook + * + * @param func API-callback hook function + * @return EM_OK if callback hook unregistration succeeded + */ +em_status_t +em_hooks_unregister_alloc(em_api_hook_alloc_t func); + +/** + * Register an API-callback hook for em_free(). + * + * The hook will be called before freeing the actual event, after verifying that + * the event given to em_free() is valid, thus the hook does not 'see' if the + * actual free-operation succeeds or fails. + * The state and ownership of the event must not be changed by the hook, e.g. + * the event must not be freed or sent etc. Calling em_free() within the + * free hook leads to hook recursion and must be avoided. + * + * API-callback hook functions can be called concurrently from different cores. + * + * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') + * can be registered. + * The order of calling multiple registered hook functions is the order of + * registration. If same function is registered twice it will be called twice. + * + * @param func API-callback hook function + * @return EM_OK if callback hook registration succeeded + */ +em_status_t +em_hooks_register_free(em_api_hook_free_t func); + +/** + * Unregister an em_free() callback hook + * + * @param func API-callback hook function + * @return EM_OK if callback hook unregistration succeeded + */ +em_status_t +em_hooks_unregister_free(em_api_hook_free_t func); + +/** + * Register an API-callback hook for em_send(), em_send_multi(), em_send_group() + * and em_send_group_multi(). + * + * Sending multiple events with an event group is the most generic + * variant and thus one callback covers all. + * The hook will be called just before sending the actual event(s), thus + * the hook does not 'see' if the actual send operation succeeds or + * fails. + * The state and ownership of the events must not be changed by the + * hook, e.g. the events can not be freed or sent etc. + * Calling em_send...() within the send hook leads to hook recursion and + * must be avoided. + * + * API-callback hook functions can be called concurrently from different cores. + * + * Multiple API-callback hook functions (up to the number 'EM_CALLBACKS_MAX') + * can be registered. + * The order of calling multiple registered hook functions is the order of + * registration. If same function is registered twice it will be called twice. + * + * @param func API-callback hook function + * @return EM_OK if callback hook registration succeeded + */ +em_status_t +em_hooks_register_send(em_api_hook_send_t func); + +/** + * Unregister an em_send_...() callback hook + * + * @param func API-callback hook function + * @return EM_OK if callback hook unregistration succeeded + */ +em_status_t +em_hooks_unregister_send(em_api_hook_send_t func); + +/** + * Register an idle hook that will be called when entering the idle state. + * + * To_idle hooks will be called by the EM dispatcher when a core enters the idle + * state, i.e. when no further events are available from scheduled or local + * queues for processing. The to_idle hooks will be called only if the core + * previously was in the active state. + * + * Multiple to_idle hook functions (up to the number 'EM_CALLBACKS_MAX') can be + * registered. The order of calling multiple registered hook functions is the + * order of registration. If the same function is registered twice it will be + * called twice. + * + * @param func Idle hook function + * @return EM_OK if idle hook registration succeeded + */ +em_status_t +em_hooks_register_to_idle(em_idle_hook_to_idle_t func); + +/** + * Unregister a to_idle hook. + * + * @param func Idle hook function + * @return EM_OK if idle hook unregistration succeeded + */ +em_status_t +em_hooks_unregister_to_idle(em_idle_hook_to_idle_t func); + +/** + * Register an idle hook that will be called when a core is entering the active + * state. + * + * To_active hooks will be called by the EM dispatcher when a core enters the + * active state, i.e. it received events from scheduled or local queues after + * being in the idle state. The to_active hooks will be called before the actual + * event processing is started and only if the core previously was in the idle + * state. + * + * Multiple to_active hook functions (up to the number 'EM_CALLBACKS_MAX') can + * be registered. The order of calling multiple registered hook functions is the + * order of registration. If the same function is registered twice it will be + * called twice. + * + * @param func Idle hook function + * @return EM_OK if idle hook registration succeeded + */ +em_status_t +em_hooks_register_to_active(em_idle_hook_to_active_t func); + +/** + * Unregister a to_active hook + * + * @param func Idle hook function + * @return EM_OK if idle hook unregistration succeeded + */ +em_status_t +em_hooks_unregister_to_active(em_idle_hook_to_active_t func); + +/** + * Register an idle hook that will be called while staying in the idle state. + * + * While_idle hooks will be called by the EM dispatcher while a core remains in + * the idle state, i.e. the core didn't get any events from scheduled or local + * queues for processing while already being in the idle state. + * + * Multiple while_idle hook functions (up to the number 'EM_CALLBACKS_MAX') can + * be registered. The order of calling multiple registered hook functions is the + * order of registration. If the same function is registered twice it will be + * called twice. + * + * @param func Idle hook function + * @return EM_OK if idle hook registration succeeded + */ +em_status_t +em_hooks_register_while_idle(em_idle_hook_while_idle_t func); + +/** + * Unregister a while_idle hook + * + * @param func Idle hook function + * @return EM_OK if idle hook unregistration succeeded + */ +em_status_t +em_hooks_unregister_while_idle(em_idle_hook_while_idle_t func); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_HOOKS_H_ */ diff --git a/include/event_machine/platform/event_machine_hw_specific.h b/include/event_machine/platform/event_machine_hw_specific.h index abdb2530..2487c2ca 100644 --- a/include/event_machine/platform/event_machine_hw_specific.h +++ b/include/event_machine/platform/event_machine_hw_specific.h @@ -1,276 +1,282 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine HW specific functions and other additions. - */ - -#ifndef EVENT_MACHINE_HW_SPECIFIC_H -#define EVENT_MACHINE_HW_SPECIFIC_H - -#pragma GCC visibility push(default) - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Get the major event type. - * - * The event type includes a major and a minor part. This function returns the - * major part. It can be compared against the enumeration em_event_type_major_e. - * - * @param type Event type - * - * @return Major event type - */ -static inline em_event_type_t -em_get_type_major(em_event_type_t type) -{ - return type & 0xFF000000; -} - -/** - * Get the minor event type. - * - * The event type includes a major and a minor part. This function returns the - * minor part. It can be compared against a type-specific minor enumeration. - * - * EM_EVENT_TYPE_SW_DEFAULT is reserved for (SW) events that are - * generic and directly accessible buffers of memory. - * - * @param type Event type - * - * @return Minor event type - */ -static inline em_event_type_t -em_get_type_minor(em_event_type_t type) -{ - return type & 0x00FFFFFF; -} - -/* - * Core mask manipulation functions - */ - -/** - * Zero the whole mask. - * - * @param[out] mask Core mask to zero (clear) - */ -void em_core_mask_zero(em_core_mask_t *mask); - -/** - * Set a bit in the mask. - * - * @param core Core id - * @param[out] mask Core mask - */ -void em_core_mask_set(int core, em_core_mask_t *mask); - -/** - * Clear a bit in the mask. - * - * @param core Core id - * @param[out] mask Core mask - */ -void em_core_mask_clr(int core, em_core_mask_t *mask); - -/** - * Test if a bit is set in the mask. - * - * @param core Core id - * @param mask Core mask - * - * @return Non-zero if core id is set in the mask - */ -int em_core_mask_isset(int core, const em_core_mask_t *mask); - -/** - * Test if the mask is all zero. - * - * @param mask Core mask - * - * @return Non-zero if the mask is all zero - */ -int em_core_mask_iszero(const em_core_mask_t *mask); - -/** - * Test if two masks are equal - * - * @param mask1 First core mask - * @param mask2 Second core mask - * - * @return Non-zero if the two masks are equal - */ -int em_core_mask_equal(const em_core_mask_t *mask1, - const em_core_mask_t *mask2); - -/** - * Set a range (0...count-1) of bits in the mask. - * - * @param count Number of bits to set - * @param[out] mask Core mask - */ -void em_core_mask_set_count(int count, em_core_mask_t *mask); - -/** - * Copy core mask - * - * @param[out] dst Destination core mask - * @param src Source core mask - */ -void em_core_mask_copy(em_core_mask_t *dst, const em_core_mask_t *src); - -/** - * Count the number of bits set in the mask. - * - * @param mask Core mask - * - * @return Number of bits set - */ -int em_core_mask_count(const em_core_mask_t *mask); - -/** - * Set specified bits from 'bits[]' in core mask. - * - * core 0: bits[0] = 0x1 (len = 1) - * core 1: bits[0] = 0x2 (len = 1) - * ... - * core 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) - * core 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) - * ... - * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) - * ... - * @param bits Array of uint64_t:s with the bits to set in the core mask - * @param len Number of array elements in bits[]. - * @param[out] mask Core mask to set. - * - * @note bits ar 'or'ed into mask, so any previously set bits will remain set. - */ -void em_core_mask_set_bits(const uint64_t bits[], int len, - em_core_mask_t *mask); - -/** - * Get core mask, stored in a uint64_t array for the user - * - * core 0: bits[0] = 0x1 (len = 1) - * core 1: bits[0] = 0x2 (len = 1) - * ... - * core 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) - * core 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) - * ... - * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) - * ... - * @param[out] bits Array of uint64_t:s that the core mask will be stored in. - * @param len Number of array elements in bits[]. - * @param mask Core mask to get bits from. - * - * @return The number of uint64_t:s written into bits[]. - */ -int em_core_mask_get_bits(uint64_t bits[/*out*/], int len, - const em_core_mask_t *mask); - -/** - * Set bits in a mask according to a given string. - * - * @param mask_str String containing '0xcoremask' to set - * @param[out] mask Core mask to set - * - * @return Zero (0) on success, non-zero on error. - * - * @note bits ar 'or'ed into mask, so any previously set bits will remain set. - */ -int em_core_mask_set_str(const char *mask_str, em_core_mask_t *mask); - -/** - * Get core mask in string format - * - * @param[out] mask_str String into which the core mask will be printed - * @param len Length of 'mask_str' - * @param mask Core mask to convert to string format - */ -void em_core_mask_tostr(char *mask_str /*out*/, int len, - const em_core_mask_t *mask); - -/** - * Return the index (position) of the Nth set bit in the core mask - * - * @param n Nth set bit, note n=1 means first set bit, n=[1...MaxCores] - * @param mask Core mask - * - * @return Index of the Nth set bit, <0 on error or if no such bit. - */ -int em_core_mask_idx(int n, const em_core_mask_t *mask); - -/** - * Bitwise AND operation on two masks, store the result in 'dst' - * - * dst = src1 & src2 - * - * @param[out] dst Destination core mask, result is stored here - * @param src1 Source mask #1 - * @param src2 Source mask #2 - */ -void em_core_mask_and(em_core_mask_t *dst, const em_core_mask_t *src1, - const em_core_mask_t *src2); - -/** - * Bitwise OR operation on two masks, store the result in 'dst' - * - * dst = src1 | src2 - * - * @param[out] dst Destination core mask, result is stored here - * @param src1 Source mask #1 - * @param src2 Source mask #2 - */ -void em_core_mask_or(em_core_mask_t *dst, const em_core_mask_t *src1, - const em_core_mask_t *src2); - -/** - * Bitwise XOR operation on two masks, store the result in 'dst' - * - * dst = src1 ^ src2 - * - * @param[out] dst Destination core mask, result is stored here - * @param src1 Source mask #1 - * @param src2 Source mask #2 - */ -void em_core_mask_xor(em_core_mask_t *dst, const em_core_mask_t *src1, - const em_core_mask_t *src2); - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_HW_SPECIFIC_H */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine HW specific functions and other additions. + */ + +#ifndef EVENT_MACHINE_HW_SPECIFIC_H +#define EVENT_MACHINE_HW_SPECIFIC_H + +#pragma GCC visibility push(default) + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Get the major event type. + * + * The event type includes a major and a minor part. This function returns the + * major part. It can be compared against the enumeration em_event_type_major_e. + * + * @param type Event type + * + * @return Major event type + */ +static inline em_event_type_t +em_event_type_major(em_event_type_t type) +{ + return type & 0xFF000000; +} + +/* Backwards compatible naming */ +#define em_get_type_major em_event_type_major + +/** + * Get the minor event type. + * + * The event type includes a major and a minor part. This function returns the + * minor part. It can be compared against a type-specific minor enumeration. + * + * EM_EVENT_TYPE_SW_DEFAULT is reserved for (SW) events that are + * generic and directly accessible buffers of memory. + * + * @param type Event type + * + * @return Minor event type + */ +static inline em_event_type_t +em_event_type_minor(em_event_type_t type) +{ + return type & 0x00FFFFFF; +} + +/* Backwards compatible naming */ +#define em_get_type_minor em_event_type_minor + +/* + * Core mask manipulation functions + */ + +/** + * Zero the whole mask. + * + * @param[out] mask Core mask to zero (clear) + */ +void em_core_mask_zero(em_core_mask_t *mask); + +/** + * Set a bit in the mask. + * + * @param core Core id + * @param[out] mask Core mask + */ +void em_core_mask_set(int core, em_core_mask_t *mask); + +/** + * Clear a bit in the mask. + * + * @param core Core id + * @param[out] mask Core mask + */ +void em_core_mask_clr(int core, em_core_mask_t *mask); + +/** + * Test if a bit is set in the mask. + * + * @param core Core id + * @param mask Core mask + * + * @return Non-zero if core id is set in the mask + */ +int em_core_mask_isset(int core, const em_core_mask_t *mask); + +/** + * Test if the mask is all zero. + * + * @param mask Core mask + * + * @return Non-zero if the mask is all zero + */ +int em_core_mask_iszero(const em_core_mask_t *mask); + +/** + * Test if two masks are equal + * + * @param mask1 First core mask + * @param mask2 Second core mask + * + * @return Non-zero if the two masks are equal + */ +int em_core_mask_equal(const em_core_mask_t *mask1, + const em_core_mask_t *mask2); + +/** + * Set a range (0...count-1) of bits in the mask. + * + * @param count Number of bits to set + * @param[out] mask Core mask + */ +void em_core_mask_set_count(int count, em_core_mask_t *mask); + +/** + * Copy core mask + * + * @param[out] dst Destination core mask + * @param src Source core mask + */ +void em_core_mask_copy(em_core_mask_t *dst, const em_core_mask_t *src); + +/** + * Count the number of bits set in the mask. + * + * @param mask Core mask + * + * @return Number of bits set + */ +int em_core_mask_count(const em_core_mask_t *mask); + +/** + * Set specified bits from 'bits[]' in core mask. + * + * core 0: bits[0] = 0x1 (len = 1) + * core 1: bits[0] = 0x2 (len = 1) + * ... + * core 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) + * core 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) + * ... + * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) + * ... + * @param bits Array of uint64_t:s with the bits to set in the core mask + * @param len Number of array elements in bits[]. + * @param[out] mask Core mask to set. + * + * @note bits ar 'or'ed into mask, so any previously set bits will remain set. + */ +void em_core_mask_set_bits(const uint64_t bits[], int len, + em_core_mask_t *mask); + +/** + * Get core mask, stored in a uint64_t array for the user + * + * core 0: bits[0] = 0x1 (len = 1) + * core 1: bits[0] = 0x2 (len = 1) + * ... + * core 64: bits[0] = 0x0, bits[1] = 0x1 (len = 2) + * core 65: bits[0] = 0x0, bits[1] = 0x2 (len = 2) + * ... + * cores 0-127: bits[0]=0xffffffffffffffff, bits[1]=0xffffffffffffffff (len=2) + * ... + * @param[out] bits Array of uint64_t:s that the core mask will be stored in. + * @param len Number of array elements in bits[]. + * @param mask Core mask to get bits from. + * + * @return The number of uint64_t:s written into bits[]. + */ +int em_core_mask_get_bits(uint64_t bits[/*out*/], int len, + const em_core_mask_t *mask); + +/** + * Set bits in a mask according to a given string. + * + * @param mask_str String containing '0xcoremask' to set + * @param[out] mask Core mask to set + * + * @return Zero (0) on success, non-zero on error. + * + * @note bits ar 'or'ed into mask, so any previously set bits will remain set. + */ +int em_core_mask_set_str(const char *mask_str, em_core_mask_t *mask); + +/** + * Get core mask in string format + * + * @param[out] mask_str String into which the core mask will be printed + * @param len Length of 'mask_str' + * @param mask Core mask to convert to string format + */ +void em_core_mask_tostr(char *mask_str /*out*/, int len, + const em_core_mask_t *mask); + +/** + * Return the index (position) of the Nth set bit in the core mask + * + * @param n Nth set bit, note n=1 means first set bit, n=[1...MaxCores] + * @param mask Core mask + * + * @return Index of the Nth set bit, <0 on error or if no such bit. + */ +int em_core_mask_idx(int n, const em_core_mask_t *mask); + +/** + * Bitwise AND operation on two masks, store the result in 'dst' + * + * dst = src1 & src2 + * + * @param[out] dst Destination core mask, result is stored here + * @param src1 Source mask #1 + * @param src2 Source mask #2 + */ +void em_core_mask_and(em_core_mask_t *dst, const em_core_mask_t *src1, + const em_core_mask_t *src2); + +/** + * Bitwise OR operation on two masks, store the result in 'dst' + * + * dst = src1 | src2 + * + * @param[out] dst Destination core mask, result is stored here + * @param src1 Source mask #1 + * @param src2 Source mask #2 + */ +void em_core_mask_or(em_core_mask_t *dst, const em_core_mask_t *src1, + const em_core_mask_t *src2); + +/** + * Bitwise XOR operation on two masks, store the result in 'dst' + * + * dst = src1 ^ src2 + * + * @param[out] dst Destination core mask, result is stored here + * @param src1 Source mask #1 + * @param src2 Source mask #2 + */ +void em_core_mask_xor(em_core_mask_t *dst, const em_core_mask_t *src1, + const em_core_mask_t *src2); + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_HW_SPECIFIC_H */ diff --git a/include/event_machine/platform/event_machine_hw_types.h b/include/event_machine/platform/event_machine_hw_types.h index 3f3008eb..83159255 100644 --- a/include/event_machine/platform/event_machine_hw_types.h +++ b/include/event_machine/platform/event_machine_hw_types.h @@ -1,591 +1,624 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine HW specific types - */ - -#ifndef EVENT_MACHINE_HW_TYPES_H -#define EVENT_MACHINE_HW_TYPES_H - -#pragma GCC visibility push(default) - -#include - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * @typedef em_pool_t - * Memory/Event Pool handle. - * - * Defines the memory pool e.g. used in em_alloc(). - * The default pool is defined by EM_POOL_DEFAULT. - * - * @see em_alloc(), event_machine_hw_config.h - */ -EM_HANDLE_T(em_pool_t); -/** Undefined EM pool */ -#define EM_POOL_UNDEF EM_STATIC_CAST(em_pool_t, EM_HDL_UNDEF) -/** em_pool_t printf format */ -#define PRI_POOL PRI_HDL - -/** - * Major event types. - */ -typedef enum em_event_type_major_e { - EM_EVENT_TYPE_UNDEF = 0, /**< Undef */ - EM_EVENT_TYPE_SW = 1 << 24, /**< Event from SW (EO) */ - EM_EVENT_TYPE_PACKET = 2 << 24, /**< Event from packet HW */ - EM_EVENT_TYPE_TIMER = 3 << 24, /**< Event from timer HW */ - EM_EVENT_TYPE_CRYPTO = 4 << 24 /**< Event from crypto HW */ -} em_event_type_major_e; - -/** - * @enum em_event_type_sw_minor_e - * Minor event types for the major EM_EVENT_TYPE_SW type. - */ -typedef enum em_event_type_sw_minor_e { - EM_EVENT_TYPE_SW_DEFAULT = 0 -} em_event_type_sw_minor_e; - -/** - * Queue types - */ -typedef enum em_queue_type_e { - /** Undefined */ - EM_QUEUE_TYPE_UNDEF = 0, - /** - * The application receives events one by one, non-concurrently to - * guarantee exclusive processing and ordering - */ - EM_QUEUE_TYPE_ATOMIC = 1, - /** - * The application may receive events fully concurrently, egress event - * ordering (when processed in parallel) not guaranteed - */ - EM_QUEUE_TYPE_PARALLEL = 2, - /** - * The application may receive events concurrently, but the system takes - * care of egress order (between two queues) - */ - EM_QUEUE_TYPE_PARALLEL_ORDERED = 3, - /** - * A queue which is not connected to scheduling. The application needs - * to explicitly dequeue events - */ - EM_QUEUE_TYPE_UNSCHEDULED = 4, - /** - * A queue type for local virtual queue not connected to scheduling. - */ - EM_QUEUE_TYPE_LOCAL = 5, - /** - * A system specific queue type to abstract output from EM, - * e.g. packet output or output towards a HW accelerator. - * The application uses em_send() and variants to send an event 'out'. - */ - EM_QUEUE_TYPE_OUTPUT = 6 -} em_queue_type_e; - -/** - * Portable queue priorities. - * - * These are generic portable values to use for priority. - * - * Alternatively application may choose to use numeric values in the valid - * range (from 0 to em_queue_get_num_prio() - 1). - * - * @see em_queue_prio_t, em_queue_get_num_prio() - */ -typedef enum em_queue_prio_e { - EM_QUEUE_PRIO_LOWEST = 0, /**< Lowest */ - EM_QUEUE_PRIO_LOW = 2, /**< Low */ - EM_QUEUE_PRIO_NORMAL = 4, /**< Normal */ - EM_QUEUE_PRIO_HIGH = 6, /**< High */ - EM_QUEUE_PRIO_HIGHEST = 7 /**< Highest */ -} em_queue_prio_e; -#define EM_QUEUE_PRIO_UNDEF 0xFF /**< Undefined */ - -/** - * em_queue_flag_t values (system specific): - * Only combine with bitwise OR. - */ -/** - * @def EM_QUEUE_FLAG_DEFAULT - * - * em_queue_flag_t default value. The EM queues will use implementation specific - * default values. - * The default values for this implementation values imply: - * EM_QUEUE_FLAG_DEFAULT = MTSAFE and BLOCKING queue implementation - */ -#define EM_QUEUE_FLAG_DEFAULT 0 - -/** - * @def EM_QUEUE_FLAG_BLOCKING - * Blocking queue implementation. A suspeding thread may block all other - * threads, i.e. no block freedom guarantees. - * Implied by EM_QUEUE_FLAG_DEFAULT for the implementation on this system. - */ -#define EM_QUEUE_FLAG_BLOCKING 0 /* blocking, fastest (default) */ - -/** - * @def EM_QUEUE_FLAG_NONBLOCKING_LF - * - * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. - * - * Require a non-blocking and lock-free queue implementation. - * Other threads can make progress while a thread is suspended. - * Starvation freedom is not guaranteed. - * Queue creation will fail if set and not supported. - */ -#define EM_QUEUE_FLAG_NONBLOCKING_LF 1 /* non-blocking, lock-free */ - -/** - * @def EM_QUEUE_FLAG_NONBLOCKING_WF - * - * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. - * - * Require a non-blocking and wait-free queue implementation. - * Other threads can make progress while a thread is suspended. - * Starvation freedom is guaranteed. - * Queue creation will fail if set and not supported. - */ -#define EM_QUEUE_FLAG_NONBLOCKING_WF 2 /* non-blocking, wait-free */ - -/** - * @def EM_QUEUE_FLAG_ENQ_NOT_MTSAFE - * - * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. - * - * Default multithread safe enqueue implementation not needed, the application - * guarantees there is no concurrent accesses in enqueue, i.e. em_send(). - * This can only be used with unscheduled queues and can potentially improve - * performance. The implementation may choose to ignore this flag. - * Use with care. - **/ -#define EM_QUEUE_FLAG_ENQ_NOT_MTSAFE 4 - -/** - * @def EM_QUEUE_FLAG_DEQ_NOT_MTSAFE - * - * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. - * - * Default multithread safe dequeue implementation not needed, the application - * guarantees there is no concurrent accesses in dequeue, i.e. - * em_queue_dequeue(). This can only be used with unscheduled queues and can - * potentially improve performance. The implementation may choose to ignore this - * flag. Use with care. - **/ -#define EM_QUEUE_FLAG_DEQ_NOT_MTSAFE 8 - -/** - * EM core mask. - * Each bit represents one core, core 0 is the lsb (1 << em_core_id()) - * Note, that EM will enumerate the core identifiers to always start from 0 and - * be contiguous meaning the core numbers are not necessarily physical. - * - * Use the functions in event_machine_hw_specific.h to manipulate the - * core masks. - * - * @see em_queue_group_create() - */ -typedef struct { - odp_cpumask_t odp_cpumask; -} em_core_mask_t; - -/** Number of chars needed to hold core mask as a string:'0xcoremask' + '\0' */ -#define EM_CORE_MASK_STRLEN ((EM_MAX_CORES + 3) / 4 + 3) - -/** - * @def EM_MAX_SUBPOOLS - * @brief The number of subpools in each EM pool. - * The subpool is a pool with buffers of only one size. - */ -#define EM_MAX_SUBPOOLS 4 - -/** - * Error/Status codes - */ -typedef enum em_status_e { - /** Illegal context */ - EM_ERR_BAD_CONTEXT = 1, - /** Illegal state */ - EM_ERR_BAD_STATE = 2, - /** ID not from a valid range */ - EM_ERR_BAD_ID = 3, - /** Invalid argument */ - EM_ERR_BAD_ARG = 4, - /** Resource allocation failed */ - EM_ERR_ALLOC_FAILED = 5, - /** Resource already reserved by someone else */ - EM_ERR_NOT_FREE = 6, - /** Resource not found */ - EM_ERR_NOT_FOUND = 7, - /** Value over the limit */ - EM_ERR_TOO_LARGE = 8, - /** Value under the limit */ - EM_ERR_TOO_SMALL = 9, - /** Operation failed */ - EM_ERR_OPERATION_FAILED = 10, - /** Failure in a library function */ - EM_ERR_LIB_FAILED = 11, - /** Implementation missing (placeholder) */ - EM_ERR_NOT_IMPLEMENTED = 12, - /** Pointer from bad memory area (e.g. NULL) */ - EM_ERR_BAD_POINTER = 13, - /** Operation timeout (e.g. waiting on a lock) */ - EM_ERR_TIMEOUT = 14, - /** Not properly initialiazed (e.g. not using provided initializer) */ - EM_ERR_NOT_INITIALIZED = 15, - /** ESV (reserved): Invalid event state detected, e.g. double-free */ - EM_ERR_EVENT_STATE = 16, - /** Operation is too near current time or in past */ - EM_ERR_TOONEAR = 17, - /** Time target too far, e.g. timeout exceeds maximum supported value */ - EM_ERR_TOOFAR = 18, - /** Timeout was canceled, e.g. periodic timer */ - EM_ERR_CANCELED = 19, - - /** Other error. This is the last error code (for bounds checking) */ - EM_ERR -} em_status_e; - -/** - * EM log level - */ -typedef enum { - EM_LOG_DBG, - EM_LOG_PRINT, - EM_LOG_ERR -} em_log_level_t; - -/** - * EM log function, variable number of args - * - * @note: both 'log()' and 'vlog()' need to be implemented if used. - */ -typedef int (*em_log_func_t)(em_log_level_t level, const char *fmt, ...) - __attribute__((format(printf, 2, 3))); - -/** - * EM log function, va_list instead of variable number of args - * - * @note: both 'log()' and 'vlog()' need to be implemented if used. - */ -typedef int (*em_vlog_func_t)(em_log_level_t level, const char *fmt, - va_list args); - -/** - * Input poll function - poll various input sources for pkts/events and enqueue - * into EM. - * - * User provided function - EM calls this, if not NULL, in the dispatch loop on - * each core - set via 'em_conf.input.input_poll_fn' - * - * @return number of pkts/events received from input and enqueued into EM - */ -typedef int (*em_input_poll_func_t)(void); - -/** - * 'Periodical' draining of output from EM, if needed. - * - * User provided function - EM calls this, if not NULL, in the dispatch loop on - * each core - set via 'em_conf.output.output_drain_fn' - * - * Draining of output events/pkts: EM will every once in a while call this - * user provided function to ensure that low rate buffered output is eventually - * sent out. Not needed if your EM output queues (EM_QUEUE_TYPE_OUTPUT) always - * sends all events out. Useful in situations where output is buffered and sent - * out in bursts when enough output has been gathered - single events or low - * rate flows may, without this function, never be sent out (or too late) if the - * buffering threshold has not been reached. - * - * @return number of events successfully drained and sent for output - */ -typedef int (*em_output_drain_func_t)(void); - -/** - * Output function, user provided callback for queues of type - * EM_QUEUE_TYPE_OUTPUT. - * - * This function will be called by em_send*() when sending to a queue of type - * EM_QUEUE_TYPE_OUTPUT and EM will take care of correct function calling order - * based on the scheduling context type. - * The function can use em_sched_context_type_current() if it needs information - * about e.g. ordering requirements set by the parent scheduled queue. - * - * @param events List of events to be sent out (ptr to array of events) - * @param num Number of events (positive integer) - * @param output_queue Output queue that the events were sent to (em_send*()) - * @param output_fn_args Extra arguments to indicate e.g. ordering requirement - * of the source context. - * - * @return number of events successfully sent (equal to num if all successful) - */ -typedef int (*em_output_func_t)(const em_event_t events[], - const unsigned int num, - const em_queue_t output_queue, - void *output_fn_args); - -/** - * Platform specific output queue conf, replace for your platform. - * Given to em_queue_create(type=EM_QUEUE_TYPE_OUTPUT) as em_queue_conf_t::conf - */ -typedef struct { - /** - * User provided function for sending events out. This function will be - * called by em_send*() when sending to a queue of type - * EM_QUEUE_TYPE_OUTPUT - */ - em_output_func_t output_fn; - /** - * Size of the argument-data passed via 'output_fn_args'. - * 'output_fn_args' is ignored, if 'args_len' is 0. - **/ - size_t args_len; - /** - * Extra output-function argument that will be passed. - */ - void *output_fn_args; -} em_output_queue_conf_t; - -/** - * @def EM_ERROR_FATAL_MASK - * Fatal error mask - */ -#define EM_ERROR_FATAL_MASK 0x80000000 -/** - * @def EM_ERROR_IS_FATAL - * Test if error is fatal - */ -#define EM_ERROR_IS_FATAL(error) (!!(EM_ERROR_FATAL_MASK & (error))) -/** - * @def EM_ERROR_SET_FATAL - * Set a fatal error code - */ -#define EM_ERROR_SET_FATAL(error) (EM_ERROR_FATAL_MASK | (error)) -/* Alias, shorter name, backwards compatible */ -#define EM_FATAL(error) EM_ERROR_SET_FATAL((error)) - -/** - * @def EM_ESCOPE_INTERNAL_TYPE - * EM Internal (non-public API) functions error scope - * - * @see EM_ESCOPE_API_TYPE and EM_ESCOPE_API_MASK used by the public EM API. - */ -#define EM_ESCOPE_INTERNAL_TYPE (0xFEu) -/** - * @def EM_ESCOPE_INTERNAL_MASK - * EM Internal (non-public API) functions error mask - * - * @see EM_ESCOPE_API_TYPE and EM_ESCOPE_API_MASK used by the public EM API. - */ -#define EM_ESCOPE_INTERNAL_MASK (EM_ESCOPE_BIT | \ - (EM_ESCOPE_INTERNAL_TYPE << 24)) -/** - * @def EM_ESCOPE_INTERNAL - * Test if the error scope identifies an EM Internal function - */ -#define EM_ESCOPE_INTERNAL(escope) (((escope) & EM_ESCOPE_MASK) \ - == EM_ESCOPE_INTERNAL_MASK) - -/** - * @def EM_ESCOPE_CONF_INIT - * EM error scope: initialize the Event Machine em_conf_t struct - */ -#define EM_ESCOPE_CONF_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0001) -/** - * @def EM_ESCOPE_INIT - * EM error scope: initialize the Event Machine - */ -#define EM_ESCOPE_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0002) -/** - * @def EM_ESCOPE_INIT_CORE - * EM error scope: initialize an Event Machine core - */ -#define EM_ESCOPE_INIT_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0003) -/** - * @def EM_ESCOPE_TERM - * EM error scope: terminate the Event Machine - */ -#define EM_ESCOPE_TERM (EM_ESCOPE_INTERNAL_MASK | 0x0004) -/** - * @def EM_ESCOPE_TERM_CORE - * EM error scope: terminate an Event Machine core - */ -#define EM_ESCOPE_TERM_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0005) - -/** - * @def EM_ESCOPE_POOL_CFG_INIT - * EM error scope: create an event pool - */ -#define EM_ESCOPE_POOL_CFG_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0101) -/** - * @def EM_ESCOPE_POOL_CREATE - * EM error scope: create an event pool - */ -#define EM_ESCOPE_POOL_CREATE (EM_ESCOPE_INTERNAL_MASK | 0x0102) -/** - * @def EM_ESCOPE_POOL_DELETE - * EM error scope: delete an event pool - */ -#define EM_ESCOPE_POOL_DELETE (EM_ESCOPE_INTERNAL_MASK | 0x0103) -/** - * @def EM_ESCOPE_POOL_FIND - * EM error scope: find an event pool by name - */ -#define EM_ESCOPE_POOL_FIND (EM_ESCOPE_INTERNAL_MASK | 0x0104) -/** - * @def EM_ESCOPE_POOL_GET_NAME - * EM error scope: get an event pool name - */ -#define EM_ESCOPE_POOL_GET_NAME (EM_ESCOPE_INTERNAL_MASK | 0x0105) -/** - * @def EM_ESCOPE_POOL_GET_FIRST - * EM error scope: event pool iteration - get first of iteration - */ -#define EM_ESCOPE_POOL_GET_FIRST (EM_ESCOPE_INTERNAL_MASK | 0x0106) -/** - * @def EM_ESCOPE_POOL_GET_NEXT - * EM error scope: event pool iteration - get next of iteration - */ -#define EM_ESCOPE_POOL_GET_NEXT (EM_ESCOPE_INTERNAL_MASK | 0x0107) -/** - * @def EM_ESCOPE_POOL_INFO - * EM error scope: event pool info & statistics - */ -#define EM_ESCOPE_POOL_INFO (EM_ESCOPE_INTERNAL_MASK | 0x0108) -/** - * @def EM_ESCOPE_HOOKS_REGISTER_ALLOC - * EM error scope: register API callback hook for em_alloc() - */ -#define EM_ESCOPE_HOOKS_REGISTER_ALLOC (EM_ESCOPE_INTERNAL_MASK | 0x0201) -/** - * @def EM_ESCOPE_HOOKS_UNREGISTER_ALLOC - * EM error scope: unregister API callback hook for em_alloc() - */ -#define EM_ESCOPE_HOOKS_UNREGISTER_ALLOC (EM_ESCOPE_INTERNAL_MASK | 0x0202) -/** - * @def EM_ESCOPE_HOOKS_REGISTER_FREE - * EM error scope: register API callback hook for em_free() - */ -#define EM_ESCOPE_HOOKS_REGISTER_FREE (EM_ESCOPE_INTERNAL_MASK | 0x0203) -/** - * @def EM_ESCOPE_HOOKS_UNREGISTER_FREE - * EM error scope: unregister API callback hook for em_free() - */ -#define EM_ESCOPE_HOOKS_UNREGISTER_FREE (EM_ESCOPE_INTERNAL_MASK | 0x0204) -/** - * @def EM_ESCOPE_HOOKS_REGISTER_SEND - * EM error scope: register API callback hook for em_send-variants - */ -#define EM_ESCOPE_HOOKS_REGISTER_SEND (EM_ESCOPE_INTERNAL_MASK | 0x0205) -/** - * @def EM_ESCOPE_HOOKS_UNREGISTER_SEND - * EM error scope: unregister API callback hook for em_send-variants - */ -#define EM_ESCOPE_HOOKS_UNREGISTER_SEND (EM_ESCOPE_INTERNAL_MASK | 0x0206) -/** - * @def EM_ESCOPE_EVENT_SEND_DEVICE - * EM error scope: send event to another device - */ -#define EM_ESCOPE_EVENT_SEND_DEVICE (EM_ESCOPE_INTERNAL_MASK | 0x0301) -/** - * @def EM_ESCOPE_EVENT_SEND_DEVICE_MULTI - * EM error scope: send event(s) to another device - */ -#define EM_ESCOPE_EVENT_SEND_DEVICE_MULTI (EM_ESCOPE_INTERNAL_MASK | 0x0302) - -/** - * @def EM_ESCOPE_DAEMON - * EM internal escope: EO Daemon - */ -#define EM_ESCOPE_DAEMON (EM_ESCOPE_INTERNAL_MASK | 0x0401) - -/** - * @def EM_ESCOPE_EVENT_GROUP_UPDATE - * EM internal esope: Update the event group count - */ -#define EM_ESCOPE_EVENT_GROUP_UPDATE (EM_ESCOPE_INTERNAL_MASK | 0x0501) - -/* EM internal escopes: Queue */ -#define EM_ESCOPE_QUEUE_ENABLE (EM_ESCOPE_INTERNAL_MASK | 0x0601) -#define EM_ESCOPE_QUEUE_ENABLE_ALL (EM_ESCOPE_INTERNAL_MASK | 0x0602) -#define EM_ESCOPE_QUEUE_DISABLE (EM_ESCOPE_INTERNAL_MASK | 0x0603) -#define EM_ESCOPE_QUEUE_DISABLE_ALL (EM_ESCOPE_INTERNAL_MASK | 0x0604) -#define EM_ESCOPE_QUEUE_STATE_CHANGE (EM_ESCOPE_INTERNAL_MASK | 0x0605) - -/* EM internal escopes: Queue Groups */ -#define EM_ESCOPE_QUEUE_GROUP_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0701) -#define EM_ESCOPE_QUEUE_GROUP_INIT_LOCAL (EM_ESCOPE_INTERNAL_MASK | 0x0702) -#define EM_ESCOPE_QUEUE_GROUP_DEFAULT (EM_ESCOPE_INTERNAL_MASK | 0x0703) -#define EM_ESCOPE_QUEUE_GROUP_ADD_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0704) -#define EM_ESCOPE_QUEUE_GROUP_REM_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0705) - -/* Other internal escopes */ -#define EM_ESCOPE_EO_START_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0801) -#define EM_ESCOPE_EO_START_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0802) -#define EM_ESCOPE_EO_STOP_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0803) -#define EM_ESCOPE_EO_STOP_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0804) -#define EM_ESCOPE_EO_REMOVE_QUEUE_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0805) -#define EM_ESCOPE_EO_REMOVE_QUEUE_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK |\ - 0x0806) -#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL_DONE_CB (EM_ESCOPE_INTERNAL_MASK |\ - 0x0807) -#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK |\ - 0x0808) -#define EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ (EM_ESCOPE_INTERNAL_MASK | 0x0809) -#define EM_ESCOPE_INTERNAL_NOTIF (EM_ESCOPE_INTERNAL_MASK | 0x080A) -#define EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC (EM_ESCOPE_INTERNAL_MASK | 0x080B) -#define EM_ESCOPE_EVENT_INTERNAL_DONE (EM_ESCOPE_INTERNAL_MASK | 0x080C) -#define EM_ESCOPE_EVENT_INTERNAL_LFUNC_CALL (EM_ESCOPE_INTERNAL_MASK | 0x080D) -#define EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ (EM_ESCOPE_INTERNAL_MASK | 0x080E) -#define EM_ESCOPE_POLL_UNSCHED_CTRL_QUEUE (EM_ESCOPE_INTERNAL_MASK | 0x080F) -#define EM_ESCOPE_EVENT_TO_HDR (EM_ESCOPE_INTERNAL_MASK | 0x0810) -#define EM_ESCOPE_EVENT_TO_HDR_MULTI (EM_ESCOPE_INTERNAL_MASK | 0x0812) -#define EM_ESCOPE_EVENT_INIT_ODP (EM_ESCOPE_INTERNAL_MASK | 0x0813) -#define EM_ESCOPE_EVENT_INIT_ODP_MULTI (EM_ESCOPE_INTERNAL_MASK | 0x0814) - -/** - * @def EM_ESCOPE_ODP_EXT - * EM ODP extensions error scope - */ -#define EM_ESCOPE_ODP_EXT (EM_ESCOPE_INTERNAL_MASK | 0x1000) - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_HW_TYPES_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine HW specific types + */ + +#ifndef EVENT_MACHINE_HW_TYPES_H +#define EVENT_MACHINE_HW_TYPES_H + +#pragma GCC visibility push(default) + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @typedef em_pool_t + * Memory/Event Pool handle. + * + * Defines the memory pool e.g. used in em_alloc(). + * The default pool is defined by EM_POOL_DEFAULT. + * + * @see em_alloc(), event_machine_hw_config.h + */ +EM_HANDLE_T(em_pool_t); +/** Undefined EM pool */ +#define EM_POOL_UNDEF EM_STATIC_CAST(em_pool_t, EM_HDL_UNDEF) +/** em_pool_t printf format */ +#define PRI_POOL PRI_HDL + +/** + * Major event types. + */ +typedef enum em_event_type_major_e { + EM_EVENT_TYPE_UNDEF = 0, /**< Undef */ + EM_EVENT_TYPE_SW = 1 << 24, /**< Event from SW (EO) */ + EM_EVENT_TYPE_PACKET = 2 << 24, /**< Event from packet HW */ + EM_EVENT_TYPE_TIMER = 3 << 24, /**< Event from timer HW */ + EM_EVENT_TYPE_CRYPTO = 4 << 24, /**< Event from crypto HW */ + EM_EVENT_TYPE_VECTOR = 5 << 24 /**< Event contains a (packet) vector */ +} em_event_type_major_e; + +/** + * @enum em_event_type_sw_minor_e + * Minor event types for the major EM_EVENT_TYPE_SW type. + */ +typedef enum em_event_type_sw_minor_e { + EM_EVENT_TYPE_SW_DEFAULT = 0 +} em_event_type_sw_minor_e; + +/** + * Queue types + */ +typedef enum em_queue_type_e { + /** Undefined */ + EM_QUEUE_TYPE_UNDEF = 0, + /** + * The application receives events one by one, non-concurrently to + * guarantee exclusive processing and ordering + */ + EM_QUEUE_TYPE_ATOMIC = 1, + /** + * The application may receive events fully concurrently, egress event + * ordering (when processed in parallel) not guaranteed + */ + EM_QUEUE_TYPE_PARALLEL = 2, + /** + * The application may receive events concurrently, but the system takes + * care of egress order (between two queues) + */ + EM_QUEUE_TYPE_PARALLEL_ORDERED = 3, + /** + * A queue which is not connected to scheduling. The application needs + * to explicitly dequeue events + */ + EM_QUEUE_TYPE_UNSCHEDULED = 4, + /** + * A queue type for local virtual queue not connected to scheduling. + */ + EM_QUEUE_TYPE_LOCAL = 5, + /** + * A system specific queue type to abstract output from EM, + * e.g. packet output or output towards a HW accelerator. + * The application uses em_send() and variants to send an event 'out'. + */ + EM_QUEUE_TYPE_OUTPUT = 6 +} em_queue_type_e; + +/** + * Portable queue priorities. + * + * These are generic portable values to use for priority. + * + * Alternatively application may choose to use numeric values in the valid + * range (from 0 to em_queue_get_num_prio() - 1). + * + * @see em_queue_prio_t, em_queue_get_num_prio() + */ +typedef enum em_queue_prio_e { + EM_QUEUE_PRIO_LOWEST = 0, /**< Lowest */ + EM_QUEUE_PRIO_LOW = 2, /**< Low */ + EM_QUEUE_PRIO_NORMAL = 4, /**< Normal */ + EM_QUEUE_PRIO_HIGH = 6, /**< High */ + EM_QUEUE_PRIO_HIGHEST = 7 /**< Highest */ +} em_queue_prio_e; +#define EM_QUEUE_PRIO_UNDEF 0xFF /**< Undefined */ + +/** + * em_queue_flag_t values (system specific): + * Only combine with bitwise OR. + */ +/** + * @def EM_QUEUE_FLAG_DEFAULT + * + * em_queue_flag_t default value. The EM queues will use implementation specific + * default values. + * The default values for this implementation values imply: + * EM_QUEUE_FLAG_DEFAULT = MTSAFE and BLOCKING queue implementation + */ +#define EM_QUEUE_FLAG_DEFAULT 0 + +/** + * @def EM_QUEUE_FLAG_BLOCKING + * Blocking queue implementation. A suspeding thread may block all other + * threads, i.e. no block freedom guarantees. + * Implied by EM_QUEUE_FLAG_DEFAULT for the implementation on this system. + */ +#define EM_QUEUE_FLAG_BLOCKING 0 /* blocking, fastest (default) */ + +/** + * @def EM_QUEUE_FLAG_NONBLOCKING_LF + * + * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. + * + * Require a non-blocking and lock-free queue implementation. + * Other threads can make progress while a thread is suspended. + * Starvation freedom is not guaranteed. + * Queue creation will fail if set and not supported. + */ +#define EM_QUEUE_FLAG_NONBLOCKING_LF 1 /* non-blocking, lock-free */ + +/** + * @def EM_QUEUE_FLAG_NONBLOCKING_WF + * + * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. + * + * Require a non-blocking and wait-free queue implementation. + * Other threads can make progress while a thread is suspended. + * Starvation freedom is guaranteed. + * Queue creation will fail if set and not supported. + */ +#define EM_QUEUE_FLAG_NONBLOCKING_WF 2 /* non-blocking, wait-free */ + +/** + * @def EM_QUEUE_FLAG_ENQ_NOT_MTSAFE + * + * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. + * + * Default multithread safe enqueue implementation not needed, the application + * guarantees there is no concurrent accesses in enqueue, i.e. em_send(). + * This can only be used with unscheduled queues and can potentially improve + * performance. The implementation may choose to ignore this flag. + * Use with care. + **/ +#define EM_QUEUE_FLAG_ENQ_NOT_MTSAFE 4 + +/** + * @def EM_QUEUE_FLAG_DEQ_NOT_MTSAFE + * + * em_queue_flag_t value (system specific). Only combine flags with bitwise OR. + * + * Default multithread safe dequeue implementation not needed, the application + * guarantees there is no concurrent accesses in dequeue, i.e. + * em_queue_dequeue(). This can only be used with unscheduled queues and can + * potentially improve performance. The implementation may choose to ignore this + * flag. Use with care. + **/ +#define EM_QUEUE_FLAG_DEQ_NOT_MTSAFE 8 + +/** + * EM core mask. + * Each bit represents one core, core 0 is the lsb (1 << em_core_id()) + * Note, that EM will enumerate the core identifiers to always start from 0 and + * be contiguous meaning the core numbers are not necessarily physical. + * + * Use the functions in event_machine_hw_specific.h to manipulate the + * core masks. + * + * @see em_queue_group_create() + */ +typedef struct { + odp_cpumask_t odp_cpumask; +} em_core_mask_t; + +/** Number of chars needed to hold core mask as a string:'0xcoremask' + '\0' */ +#define EM_CORE_MASK_STRLEN ((EM_MAX_CORES + 3) / 4 + 3) + +/** + * @def EM_MAX_SUBPOOLS + * @brief The number of subpools in each EM pool. + * The subpool is a pool with buffers of only one size. + */ +#define EM_MAX_SUBPOOLS 4 + +/** + * Error/Status codes + */ +typedef enum em_status_e { + /** Illegal context */ + EM_ERR_BAD_CONTEXT = 1, + /** Illegal state */ + EM_ERR_BAD_STATE = 2, + /** ID not from a valid range */ + EM_ERR_BAD_ID = 3, + /** Invalid argument */ + EM_ERR_BAD_ARG = 4, + /** Resource allocation failed */ + EM_ERR_ALLOC_FAILED = 5, + /** Resource already reserved by someone else */ + EM_ERR_NOT_FREE = 6, + /** Resource not found */ + EM_ERR_NOT_FOUND = 7, + /** Value over the limit */ + EM_ERR_TOO_LARGE = 8, + /** Value under the limit */ + EM_ERR_TOO_SMALL = 9, + /** Operation failed */ + EM_ERR_OPERATION_FAILED = 10, + /** Failure in a library function */ + EM_ERR_LIB_FAILED = 11, + /** Implementation missing (placeholder) */ + EM_ERR_NOT_IMPLEMENTED = 12, + /** Pointer from bad memory area (e.g. NULL) */ + EM_ERR_BAD_POINTER = 13, + /** Operation timeout (e.g. waiting on a lock) */ + EM_ERR_TIMEOUT = 14, + /** Not properly initialiazed (e.g. not using provided initializer) */ + EM_ERR_NOT_INITIALIZED = 15, + /** ESV (reserved): Invalid event state detected, e.g. double-free */ + EM_ERR_EVENT_STATE = 16, + /** Operation is too near current time or in past */ + EM_ERR_TOONEAR = 17, + /** Time target too far, e.g. timeout exceeds maximum supported value */ + EM_ERR_TOOFAR = 18, + /** Timeout was canceled, e.g. periodic timer */ + EM_ERR_CANCELED = 19, + + /** Other error. This is the last error code (for bounds checking) */ + EM_ERR +} em_status_e; + +/** + * EM log level + */ +typedef enum { + EM_LOG_DBG, + EM_LOG_PRINT, + EM_LOG_ERR +} em_log_level_t; + +/** + * EM log function, variable number of args + * + * @note: both 'log()' and 'vlog()' need to be implemented if used. + */ +typedef int (*em_log_func_t)(em_log_level_t level, const char *fmt, ...) + __attribute__((format(printf, 2, 3))); + +/** + * EM log function, va_list instead of variable number of args + * + * @note: both 'log()' and 'vlog()' need to be implemented if used. + */ +typedef int (*em_vlog_func_t)(em_log_level_t level, const char *fmt, + va_list args); + +/** + * Input poll function - poll various input sources for pkts/events and enqueue + * into EM. + * + * User provided function - EM calls this, if not NULL, in the dispatch loop on + * each core - set via 'em_conf.input.input_poll_fn' + * + * @return number of pkts/events received from input and enqueued into EM + */ +typedef int (*em_input_poll_func_t)(void); + +/** + * 'Periodical' draining of output from EM, if needed. + * + * User provided function - EM calls this, if not NULL, in the dispatch loop on + * each core - set via 'em_conf.output.output_drain_fn' + * + * Draining of output events/pkts: EM will every once in a while call this + * user provided function to ensure that low rate buffered output is eventually + * sent out. Not needed if your EM output queues (EM_QUEUE_TYPE_OUTPUT) always + * sends all events out. Useful in situations where output is buffered and sent + * out in bursts when enough output has been gathered - single events or low + * rate flows may, without this function, never be sent out (or too late) if the + * buffering threshold has not been reached. + * + * @return number of events successfully drained and sent for output + */ +typedef int (*em_output_drain_func_t)(void); + +/** + * Output function, user provided callback for queues of type + * EM_QUEUE_TYPE_OUTPUT. + * + * This function will be called by em_send*() when sending to a queue of type + * EM_QUEUE_TYPE_OUTPUT and EM will take care of correct function calling order + * based on the scheduling context type. + * The function can use em_sched_context_type_current() if it needs information + * about e.g. ordering requirements set by the parent scheduled queue. + * + * @param events List of events to be sent out (ptr to array of events) + * @param num Number of events (positive integer) + * @param output_queue Output queue that the events were sent to (em_send*()) + * @param output_fn_args Extra arguments to indicate e.g. ordering requirement + * of the source context. + * + * @return number of events successfully sent (equal to num if all successful) + */ +typedef int (*em_output_func_t)(const em_event_t events[], + const unsigned int num, + const em_queue_t output_queue, + void *output_fn_args); + +/** + * Platform specific output queue conf, replace for your platform. + * Given to em_queue_create(type=EM_QUEUE_TYPE_OUTPUT) as em_queue_conf_t::conf + */ +typedef struct { + /** + * User provided function for sending events out. This function will be + * called by em_send*() when sending to a queue of type + * EM_QUEUE_TYPE_OUTPUT + */ + em_output_func_t output_fn; + /** + * Size of the argument-data passed via 'output_fn_args'. + * 'output_fn_args' is ignored, if 'args_len' is 0. + **/ + size_t args_len; + /** + * Extra output-function argument that will be passed. + */ + void *output_fn_args; +} em_output_queue_conf_t; + +/** + * @def EM_ERROR_FATAL_MASK + * Fatal error mask + */ +#define EM_ERROR_FATAL_MASK 0x80000000 +/** + * @def EM_ERROR_IS_FATAL + * Test if error is fatal + */ +#define EM_ERROR_IS_FATAL(error) (!!(EM_ERROR_FATAL_MASK & (error))) +/** + * @def EM_ERROR_SET_FATAL + * Set a fatal error code + */ +#define EM_ERROR_SET_FATAL(error) (EM_ERROR_FATAL_MASK | (error)) +/* Alias, shorter name, backwards compatible */ +#define EM_FATAL(error) EM_ERROR_SET_FATAL((error)) + +/** + * @def EM_ESCOPE_INTERNAL_TYPE + * EM Internal (non-public API) functions error scope + * + * @see EM_ESCOPE_API_TYPE and EM_ESCOPE_API_MASK used by the public EM API. + */ +#define EM_ESCOPE_INTERNAL_TYPE (0xFEu) +/** + * @def EM_ESCOPE_INTERNAL_MASK + * EM Internal (non-public API) functions error mask + * + * @see EM_ESCOPE_API_TYPE and EM_ESCOPE_API_MASK used by the public EM API. + */ +#define EM_ESCOPE_INTERNAL_MASK (EM_ESCOPE_BIT | \ + (EM_ESCOPE_INTERNAL_TYPE << 24)) +/** + * @def EM_ESCOPE_INTERNAL + * Test if the error scope identifies an EM Internal function + */ +#define EM_ESCOPE_INTERNAL(escope) (((escope) & EM_ESCOPE_MASK) \ + == EM_ESCOPE_INTERNAL_MASK) + +/** + * @def EM_ESCOPE_CONF_INIT + * EM error scope: initialize the Event Machine em_conf_t struct + */ +#define EM_ESCOPE_CONF_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0001) +/** + * @def EM_ESCOPE_INIT + * EM error scope: initialize the Event Machine + */ +#define EM_ESCOPE_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0002) +/** + * @def EM_ESCOPE_INIT_CORE + * EM error scope: initialize an Event Machine core + */ +#define EM_ESCOPE_INIT_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0003) +/** + * @def EM_ESCOPE_TERM + * EM error scope: terminate the Event Machine + */ +#define EM_ESCOPE_TERM (EM_ESCOPE_INTERNAL_MASK | 0x0004) +/** + * @def EM_ESCOPE_TERM_CORE + * EM error scope: terminate an Event Machine core + */ +#define EM_ESCOPE_TERM_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0005) + +/** + * @def EM_ESCOPE_POOL_CFG_INIT + * EM error scope: create an event pool + */ +#define EM_ESCOPE_POOL_CFG_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0101) +/** + * @def EM_ESCOPE_POOL_CREATE + * EM error scope: create an event pool + */ +#define EM_ESCOPE_POOL_CREATE (EM_ESCOPE_INTERNAL_MASK | 0x0102) +/** + * @def EM_ESCOPE_POOL_DELETE + * EM error scope: delete an event pool + */ +#define EM_ESCOPE_POOL_DELETE (EM_ESCOPE_INTERNAL_MASK | 0x0103) +/** + * @def EM_ESCOPE_POOL_FIND + * EM error scope: find an event pool by name + */ +#define EM_ESCOPE_POOL_FIND (EM_ESCOPE_INTERNAL_MASK | 0x0104) +/** + * @def EM_ESCOPE_POOL_GET_NAME + * EM error scope: get an event pool name + */ +#define EM_ESCOPE_POOL_GET_NAME (EM_ESCOPE_INTERNAL_MASK | 0x0105) +/** + * @def EM_ESCOPE_POOL_GET_FIRST + * EM error scope: event pool iteration - get first of iteration + */ +#define EM_ESCOPE_POOL_GET_FIRST (EM_ESCOPE_INTERNAL_MASK | 0x0106) +/** + * @def EM_ESCOPE_POOL_GET_NEXT + * EM error scope: event pool iteration - get next of iteration + */ +#define EM_ESCOPE_POOL_GET_NEXT (EM_ESCOPE_INTERNAL_MASK | 0x0107) +/** + * @def EM_ESCOPE_POOL_INFO + * EM error scope: event pool info & statistics + */ +#define EM_ESCOPE_POOL_INFO (EM_ESCOPE_INTERNAL_MASK | 0x0108) +/** + * @def EM_ESCOPE_HOOKS_REGISTER_ALLOC + * EM error scope: register API callback hook for em_alloc() + */ +#define EM_ESCOPE_HOOKS_REGISTER_ALLOC (EM_ESCOPE_INTERNAL_MASK | 0x0201) +/** + * @def EM_ESCOPE_HOOKS_UNREGISTER_ALLOC + * EM error scope: unregister API callback hook for em_alloc() + */ +#define EM_ESCOPE_HOOKS_UNREGISTER_ALLOC (EM_ESCOPE_INTERNAL_MASK | 0x0202) +/** + * @def EM_ESCOPE_HOOKS_REGISTER_FREE + * EM error scope: register API callback hook for em_free() + */ +#define EM_ESCOPE_HOOKS_REGISTER_FREE (EM_ESCOPE_INTERNAL_MASK | 0x0203) +/** + * @def EM_ESCOPE_HOOKS_UNREGISTER_FREE + * EM error scope: unregister API callback hook for em_free() + */ +#define EM_ESCOPE_HOOKS_UNREGISTER_FREE (EM_ESCOPE_INTERNAL_MASK | 0x0204) +/** + * @def EM_ESCOPE_HOOKS_REGISTER_SEND + * EM error scope: register API callback hook for em_send-variants + */ +#define EM_ESCOPE_HOOKS_REGISTER_SEND (EM_ESCOPE_INTERNAL_MASK | 0x0205) +/** + * @def EM_ESCOPE_HOOKS_UNREGISTER_SEND + * EM error scope: unregister API callback hook for em_send-variants + */ +#define EM_ESCOPE_HOOKS_UNREGISTER_SEND (EM_ESCOPE_INTERNAL_MASK | 0x0206) +/** + * @def EM_ESCOPE_HOOKS_REGISTER_TO_IDLE + * EM error scope: register an idle hook called when entering the idle state + */ +#define EM_ESCOPE_HOOKS_REGISTER_TO_IDLE (EM_ESCOPE_INTERNAL_MASK | 0x0207) +/** + * @def EM_ESCOPE_HOOKS_UNREGISTER_TO_IDLE + * EM error scope: unregister an idle hook called when entering the idle state + */ +#define EM_ESCOPE_HOOKS_UNREGISTER_TO_IDLE (EM_ESCOPE_INTERNAL_MASK | 0x0208) +/** + * @def EM_ESCOPE_HOOKS_REGISTER_TO_ACTIVE + * EM error scope: register an idle hook called when entering the active state + */ +#define EM_ESCOPE_HOOKS_REGISTER_TO_ACTIVE (EM_ESCOPE_INTERNAL_MASK | 0x0209) +/** + * @def EM_ESCOPE_HOOKS_UNREGISTER_TO_ACTIVE + * EM error scope: unregister an idle hook called when entering the active state + */ +#define EM_ESCOPE_HOOKS_UNREGISTER_TO_ACTIVE (EM_ESCOPE_INTERNAL_MASK | 0x020A) +/** + * @def EM_ESCOPE_HOOKS_REGISTER_WHILE_IDLE + * EM error scope: register an idle hook called while staying in idle state + */ +#define EM_ESCOPE_HOOKS_REGISTER_WHILE_IDLE (EM_ESCOPE_INTERNAL_MASK | 0x020B) +/** + * @def EM_ESCOPE_HOOKS_UNREGISTER_WHILE_IDLE + * EM error scope: unregister an idle hook called while staying in idle state + */ +#define EM_ESCOPE_HOOKS_UNREGISTER_WHILE_IDLE (EM_ESCOPE_INTERNAL_MASK |\ + 0x020C) + +/** + * @def EM_ESCOPE_EVENT_SEND_DEVICE + * EM error scope: send event to another device + */ +#define EM_ESCOPE_EVENT_SEND_DEVICE (EM_ESCOPE_INTERNAL_MASK | 0x0301) +/** + * @def EM_ESCOPE_EVENT_SEND_DEVICE_MULTI + * EM error scope: send event(s) to another device + */ +#define EM_ESCOPE_EVENT_SEND_DEVICE_MULTI (EM_ESCOPE_INTERNAL_MASK | 0x0302) + +/** + * @def EM_ESCOPE_DAEMON + * EM internal escope: EO Daemon + */ +#define EM_ESCOPE_DAEMON (EM_ESCOPE_INTERNAL_MASK | 0x0401) + +/** + * @def EM_ESCOPE_EVENT_GROUP_UPDATE + * EM internal esope: Update the event group count + */ +#define EM_ESCOPE_EVENT_GROUP_UPDATE (EM_ESCOPE_INTERNAL_MASK | 0x0501) + +/* EM internal escopes: Queue */ +#define EM_ESCOPE_QUEUE_ENABLE (EM_ESCOPE_INTERNAL_MASK | 0x0601) +#define EM_ESCOPE_QUEUE_ENABLE_ALL (EM_ESCOPE_INTERNAL_MASK | 0x0602) +#define EM_ESCOPE_QUEUE_DISABLE (EM_ESCOPE_INTERNAL_MASK | 0x0603) +#define EM_ESCOPE_QUEUE_DISABLE_ALL (EM_ESCOPE_INTERNAL_MASK | 0x0604) +#define EM_ESCOPE_QUEUE_STATE_CHANGE (EM_ESCOPE_INTERNAL_MASK | 0x0605) + +/* EM internal escopes: Queue Groups */ +#define EM_ESCOPE_QUEUE_GROUP_INIT (EM_ESCOPE_INTERNAL_MASK | 0x0701) +#define EM_ESCOPE_QUEUE_GROUP_INIT_LOCAL (EM_ESCOPE_INTERNAL_MASK | 0x0702) +#define EM_ESCOPE_QUEUE_GROUP_DEFAULT (EM_ESCOPE_INTERNAL_MASK | 0x0703) +#define EM_ESCOPE_QUEUE_GROUP_ADD_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0704) +#define EM_ESCOPE_QUEUE_GROUP_REM_CORE (EM_ESCOPE_INTERNAL_MASK | 0x0705) + +/* Other internal escopes */ +#define EM_ESCOPE_EO_START_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0801) +#define EM_ESCOPE_EO_START_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0802) +#define EM_ESCOPE_EO_STOP_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0803) +#define EM_ESCOPE_EO_STOP_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0804) +#define EM_ESCOPE_EO_REMOVE_QUEUE_DONE_CB (EM_ESCOPE_INTERNAL_MASK | 0x0805) +#define EM_ESCOPE_EO_REMOVE_QUEUE_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK |\ + 0x0806) +#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL_DONE_CB (EM_ESCOPE_INTERNAL_MASK |\ + 0x0807) +#define EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC_DONE_CB (EM_ESCOPE_INTERNAL_MASK |\ + 0x0808) +#define EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ (EM_ESCOPE_INTERNAL_MASK | 0x0809) +#define EM_ESCOPE_INTERNAL_NOTIF (EM_ESCOPE_INTERNAL_MASK | 0x080A) +#define EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC (EM_ESCOPE_INTERNAL_MASK | 0x080B) +#define EM_ESCOPE_EVENT_INTERNAL_DONE (EM_ESCOPE_INTERNAL_MASK | 0x080C) +#define EM_ESCOPE_EVENT_INTERNAL_LFUNC_CALL (EM_ESCOPE_INTERNAL_MASK | 0x080D) +#define EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ (EM_ESCOPE_INTERNAL_MASK | 0x080E) +#define EM_ESCOPE_POLL_UNSCHED_CTRL_QUEUE (EM_ESCOPE_INTERNAL_MASK | 0x080F) +#define EM_ESCOPE_EVENT_TO_HDR (EM_ESCOPE_INTERNAL_MASK | 0x0810) +#define EM_ESCOPE_EVENT_TO_HDR_MULTI (EM_ESCOPE_INTERNAL_MASK | 0x0812) +#define EM_ESCOPE_EVENT_INIT_ODP (EM_ESCOPE_INTERNAL_MASK | 0x0813) +#define EM_ESCOPE_EVENT_INIT_ODP_MULTI (EM_ESCOPE_INTERNAL_MASK | 0x0814) + +/** + * @def EM_ESCOPE_ODP_EXT + * EM ODP extensions error scope + */ +#define EM_ESCOPE_ODP_EXT (EM_ESCOPE_INTERNAL_MASK | 0x1000) + +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_HW_TYPES_H */ diff --git a/include/event_machine/platform/event_machine_init.h b/include/event_machine/platform/event_machine_init.h index 24c0b6f1..8981ee5d 100644 --- a/include/event_machine/platform/event_machine_init.h +++ b/include/event_machine/platform/event_machine_init.h @@ -1,300 +1,310 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_INIT_H_ -#define EVENT_MACHINE_INIT_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup init Initialization and termination - * Event Machine initialization and termination - * @{ - * - * The Event Machine must be initialized before use. One core that will be part - * of EM calls em_init(). Additionally, after the user has set up the threads, - * or processes and pinned those to HW-cores, each participating core, i.e. - * EM-core, needs to run em_init_core(). Only now is an EM-core ready to use the - * other EM API functions and can finally enter the dispatch-loop via - * em_dispath() on each core that should handle events. - * - * The EM termination sequence runs in the opposite order: each core needs to - * call em_term_core() before one last call to em_term(). - * - * The 'em_conf_t' type given to em_init() and em_term() is HW/platform specific - * and is defined in event_machine_hw_types.h - * - * Do not include this from the application, event_machine.h will - * do it for you. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Event Machine run-time configuration options given at startup to em_init() - * - * The 'em_conf_t' struct should be initialized with em_conf_init() before use. - * This initialization provides better backwards compatibility since all options - * will be set to default values. - * The user must further set the needed configuration and call em_init(): - * - * @code - * em_conf_t conf; - * em_conf_init(&conf); // init with default values - * conf.thread_per_core = 1; - * ... - * conf.core_count = N; - * conf.phys_mask = set N bits; // use em_core_mask_...() functions to set - * ... - * ret = em_init(&conf); // on one core - * ... - * ret = em_init_core(); // on each of the 'conf.core_count' cores - * @endcode - * - * Content is copied into EM by em_init(). - * - * @note Several EM options are configured through compile-time defines. - * Run-time options allow using the same EM-lib with different configs. - * Also see the overrideable EM runtime config file values, - * default file: config/em-odp.config - * - * @see em_conf_init(), em_init() - */ -typedef struct { - /** - * EM device id - use different device ids for each EM instance or - * remote EM device that need to communicate with each other. - * Default value is 0. - */ - uint16_t device_id; - - /** - * Event Timer: enable=1, disable=0. - * Default value is 0 (disable). - */ - int event_timer; - - /** - * RunMode: EM run with one thread per core. - * Set 'true' to select thread-per-core mode. - * This is the recommended mode, but the user must explicitly set it to - * enable. Default value is 0. - * @note The user must set either 'thread_per_core' or - * 'process_per_core' but not both. - */ - int thread_per_core; - - /** - * RunMode: EM run with one process per core. - * Set 'true' to select process-per-core mode. Default value is 0. - * @note The user must set either 'thread_per_core' or - * 'process_per_core' but not both. - */ - int process_per_core; - - /** - * Number of EM-cores (== number of EM-threads or EM-processes). - * The 'core_count' must match the number of bits set in 'phys_mask'. - * EM-cores will be enumerated from 0 to 'core_count-1' regardless of - * the actual physical core ids. - * Default value is 0 and needs to be changed by the user. - */ - int core_count; - - /** - * Physical core mask, exactly listing the physical CPU cores to be used - * by EM (this is a physical core mask even though the 'em_core_mask_t' - * type is used). - * Default value is all-0 and needs to be changed by the user. - * @note EM otherwise operates on logical cores, i.e. enumerated - * contiguously from 0 to 'core_count-1' and a logical - * EM core mask has 'core_count' consequtively set bits. - * Example - physical mask vs. corresponding EM core mask: - * .core_count = 8 - * .physmask: 0xf0f0 (binary: 1111 0000 1111 0000 - 8 set bits) - * = 8 phys-cores (phys-cores 4-7,12-15) - * ==> EM-mask: 0x00ff (0000 0000 1111 1111 binary) - 8 EM cores - * = 8 EM-cores (EM-cores 0-7) - */ - em_core_mask_t phys_mask; - - /** - * Pool configuration for the EM default pool (EM_POOL_DEFAULT). - * Default value is set by em_pool_cfg_init() and needs to be changed - * by the user. - */ - em_pool_cfg_t default_pool_cfg; - - /** - * EM log functions. - * Default values are NULL and causes EM to use internal default - * log-functions. - */ - struct { - /** EM log function, user overridable, variable number of args*/ - em_log_func_t log_fn; - /** EM log function, user overridable, va_list */ - em_vlog_func_t vlog_fn; - } log; - - /** EM event/pkt input related functions and config */ - struct { - /** - * User provided function, called from within the EM-dispatch - * loop, mainly for polling various input sources for events or - * pkts and then enqueue them into EM. - * Set to 'NULL' if not needed (default). - */ - em_input_poll_func_t input_poll_fn; - /** - * EM core mask to control which EM-cores (0 to 'core_count-1') - * input_poll_fn() will be called on. - * The provided mask has to be equal or a subset of the - * EM core mask with all 'core_count' bits set. - * A zero mask means execution on _all_ EM cores (default). - */ - em_core_mask_t input_poll_mask; - } input; - - /** EM event/pkt output related functions and config */ - struct { - /** - * User provided function, called from within the EM-dispatch - * loop, mainly for 'periodical' draining of buffered output to - * make sure events/pkts are eventually sent out even if the - * rate is low or stops for a while. - * Set to 'NULL' if not needed (default). - */ - em_output_drain_func_t output_drain_fn; - /** - * EM core mask to control which EM-cores (0 to 'core_count-1') - * output_drain_fn() will be called on. - * The provided mask has to be equal or a subset of the - * EM core mask with all 'core_count' bits set. - * A zero mask means execution on _all_ EM cores (default). - */ - em_core_mask_t output_drain_mask; - } output; - - /** - * User provided API callback hooks. - * Set only the needed hooks to avoid performance degradation. - * Only used if EM_API_HOOKS_ENABLE != 0 - */ - em_api_hooks_t api_hooks; - -} em_conf_t; - -/** - * Initialize configuration parameters for em_init() - * - * Initialize em_conf_t to default values for all fields. - * After initialization, the user further needs to set the mandatory fields of - * 'em_conf_t' before calling em_init(). - * Always initialize 'conf' first with em_conf_init(&conf) to - * ensure backwards compatibility with potentially added new options. - * - * @param conf Address of the em_conf_t to be initialized - * - * @see em_init() - */ -void em_conf_init(em_conf_t *conf); - -/** - * Initialize the Event Machine. - * - * Must be called once at startup. Additionally each EM-core needs to call the - * em_init_core() function before using any further EM API functions/resources. - * - * @param conf EM runtime config options, - * HW/platform specific: see event_machine_hw_types.h - * - * @return EM_OK if successful. - * - * @see em_init_core() for EM-core specific init after em_init(). - */ -em_status_t -em_init(const em_conf_t *conf); - -/** - * Initialize an EM-core. - * - * Must be called once by each EM-core (= process, thread or bare metal core). - * EM queues, EOs, queue groups etc. can be created after a successful return - * from this function. - * - * @return EM_OK if successful. - * - * @see em_init() - */ -em_status_t -em_init_core(void); - -/** - * Terminate the Event Machine. - * - * Called once at exit. Additionally, before the one call to em_term(), - * each EM-core needs to call the em_term_core() function to free up local - * resources. - * - * @param conf EM runtime config options - * - * @return EM_OK if successful. - * - * @see em_term_core() for EM-core specific termination before em_term(). - */ -em_status_t -em_term(const em_conf_t *conf); - -/** - * Terminate an EM-core. - * - * Called by each EM-core (= process, thread or bare metal core) - * before one call to em_term(). - * - * @return EM_OK if successful. - * - * @see em_term() - */ -em_status_t -em_term_core(void); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_INIT_H_ */ +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_INIT_H_ +#define EVENT_MACHINE_INIT_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup init Initialization and termination + * Event Machine initialization and termination + * @{ + * + * The Event Machine must be initialized before use. One core that will be part + * of EM calls em_init(). Additionally, after the user has set up the threads, + * or processes and pinned those to HW-cores, each participating core, i.e. + * EM-core, needs to run em_init_core(). Only now is an EM-core ready to use the + * other EM API functions and can finally enter the dispatch-loop via + * em_dispath() on each core that should handle events. + * + * The EM termination sequence runs in the opposite order: each core needs to + * call em_term_core() before one last call to em_term(). + * + * The 'em_conf_t' type given to em_init() and em_term() is HW/platform specific + * and is defined in event_machine_hw_types.h + * + * Do not include this from the application, event_machine.h will + * do it for you. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Event Machine run-time configuration options given at startup to em_init() + * + * The 'em_conf_t' struct should be initialized with em_conf_init() before use. + * This initialization provides better backwards compatibility since all options + * will be set to default values. + * The user must further set the needed configuration and call em_init(): + * + * @code + * em_conf_t conf; + * em_conf_init(&conf); // init with default values + * conf.thread_per_core = 1; + * ... + * conf.core_count = N; + * conf.phys_mask = set N bits; // use em_core_mask_...() functions to set + * ... + * ret = em_init(&conf); // on one core + * ... + * ret = em_init_core(); // on each of the 'conf.core_count' cores + * @endcode + * + * Content is copied into EM by em_init(). + * + * @note Several EM options are configured through compile-time defines. + * Run-time options allow using the same EM-lib with different configs. + * Also see the overrideable EM runtime config file values, + * default file: config/em-odp.config + * + * @see em_conf_init(), em_init() + */ +typedef struct { + /** + * EM device id - use different device ids for each EM instance or + * remote EM device that need to communicate with each other. + * Default value is 0. + */ + uint16_t device_id; + + /** + * Event Timer: enable=1, disable=0. + * Default value is 0 (disable). + */ + int event_timer; + + /** + * RunMode: EM run with one thread per core. + * Set 'true' to select thread-per-core mode. + * This is the recommended mode, but the user must explicitly set it to + * enable. Default value is 0. + * @note The user must set either 'thread_per_core' or + * 'process_per_core' but not both. + */ + int thread_per_core; + + /** + * RunMode: EM run with one process per core. + * Set 'true' to select process-per-core mode. Default value is 0. + * @note The user must set either 'thread_per_core' or + * 'process_per_core' but not both. + */ + int process_per_core; + + /** + * Number of EM-cores (== number of EM-threads or EM-processes). + * The 'core_count' must match the number of bits set in 'phys_mask'. + * EM-cores will be enumerated from 0 to 'core_count-1' regardless of + * the actual physical core ids. + * Default value is 0 and needs to be changed by the user. + */ + int core_count; + + /** + * Physical core mask, exactly listing the physical CPU cores to be used + * by EM (this is a physical core mask even though the 'em_core_mask_t' + * type is used). + * Default value is all-0 and needs to be changed by the user. + * @note EM otherwise operates on logical cores, i.e. enumerated + * contiguously from 0 to 'core_count-1' and a logical + * EM core mask has 'core_count' consequtively set bits. + * Example - physical mask vs. corresponding EM core mask: + * .core_count = 8 + * .physmask: 0xf0f0 (binary: 1111 0000 1111 0000 - 8 set bits) + * = 8 phys-cores (phys-cores 4-7,12-15) + * ==> EM-mask: 0x00ff (0000 0000 1111 1111 binary) - 8 EM cores + * = 8 EM-cores (EM-cores 0-7) + */ + em_core_mask_t phys_mask; + + /** + * Pool configuration for the EM default pool (EM_POOL_DEFAULT). + * Default value is set by em_pool_cfg_init() and needs to be changed + * by the user. + * + * Note that if the default pool configuration is also given in the + * config file through option 'startup_pools', it will override this + * default pool configuration. + */ + em_pool_cfg_t default_pool_cfg; + + /** + * EM log functions. + * Default values are NULL and causes EM to use internal default + * log-functions. + */ + struct { + /** EM log function, user overridable, variable number of args*/ + em_log_func_t log_fn; + /** EM log function, user overridable, va_list */ + em_vlog_func_t vlog_fn; + } log; + + /** EM event/pkt input related functions and config */ + struct { + /** + * User provided function, called from within the EM-dispatch + * loop, mainly for polling various input sources for events or + * pkts and then enqueue them into EM. + * Set to 'NULL' if not needed (default). + */ + em_input_poll_func_t input_poll_fn; + /** + * EM core mask to control which EM-cores (0 to 'core_count-1') + * input_poll_fn() will be called on. + * The provided mask has to be equal or a subset of the + * EM core mask with all 'core_count' bits set. + * A zero mask means execution on _all_ EM cores (default). + */ + em_core_mask_t input_poll_mask; + } input; + + /** EM event/pkt output related functions and config */ + struct { + /** + * User provided function, called from within the EM-dispatch + * loop, mainly for 'periodical' draining of buffered output to + * make sure events/pkts are eventually sent out even if the + * rate is low or stops for a while. + * Set to 'NULL' if not needed (default). + */ + em_output_drain_func_t output_drain_fn; + /** + * EM core mask to control which EM-cores (0 to 'core_count-1') + * output_drain_fn() will be called on. + * The provided mask has to be equal or a subset of the + * EM core mask with all 'core_count' bits set. + * A zero mask means execution on _all_ EM cores (default). + */ + em_core_mask_t output_drain_mask; + } output; + + /** + * User provided API callback hooks. + * Set only the needed hooks to avoid performance degradation. + * Only used if EM_API_HOOKS_ENABLE != 0 + */ + em_api_hooks_t api_hooks; + + /** + * User provided idle callback hooks. + * Set only the needed hooks to avoid performance degradation. + * Only used if EM_IDLE_HOOKS_ENABLE != 0 + */ + em_idle_hooks_t idle_hooks; +} em_conf_t; + +/** + * Initialize configuration parameters for em_init() + * + * Initialize em_conf_t to default values for all fields. + * After initialization, the user further needs to set the mandatory fields of + * 'em_conf_t' before calling em_init(). + * Always initialize 'conf' first with em_conf_init(&conf) to + * ensure backwards compatibility with potentially added new options. + * + * @param conf Address of the em_conf_t to be initialized + * + * @see em_init() + */ +void em_conf_init(em_conf_t *conf); + +/** + * Initialize the Event Machine. + * + * Must be called once at startup. Additionally each EM-core needs to call the + * em_init_core() function before using any further EM API functions/resources. + * + * @param conf EM runtime config options, + * HW/platform specific: see event_machine_hw_types.h + * + * @return EM_OK if successful. + * + * @see em_init_core() for EM-core specific init after em_init(). + */ +em_status_t +em_init(const em_conf_t *conf); + +/** + * Initialize an EM-core. + * + * Must be called once by each EM-core (= process, thread or bare metal core). + * EM queues, EOs, queue groups etc. can be created after a successful return + * from this function. + * + * @return EM_OK if successful. + * + * @see em_init() + */ +em_status_t +em_init_core(void); + +/** + * Terminate the Event Machine. + * + * Called once at exit. Additionally, before the one call to em_term(), + * each EM-core needs to call the em_term_core() function to free up local + * resources. + * + * @param conf EM runtime config options + * + * @return EM_OK if successful. + * + * @see em_term_core() for EM-core specific termination before em_term(). + */ +em_status_t +em_term(const em_conf_t *conf); + +/** + * Terminate an EM-core. + * + * Called by each EM-core (= process, thread or bare metal core) + * before one call to em_term(). + * + * @return EM_OK if successful. + * + * @see em_term() + */ +em_status_t +em_term_core(void); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_INIT_H_ */ diff --git a/include/event_machine/platform/event_machine_odp_ext.h b/include/event_machine/platform/event_machine_odp_ext.h index 0681b686..4dc0f402 100644 --- a/include/event_machine/platform/event_machine_odp_ext.h +++ b/include/event_machine/platform/event_machine_odp_ext.h @@ -1,190 +1,276 @@ -/* - * Copyright (c) 2015-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine ODP API extensions - */ - -#ifndef EVENT_MACHINE_ODP_EXT_H -#define EVENT_MACHINE_ODP_EXT_H - -#pragma GCC visibility push(default) - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include - -/** - * Get the associated ODP queue. - * - * @param queue EM queue - * - * @return odp queue if successful, ODP_QUEUE_INVALID on error - */ -odp_queue_t em_odp_queue_odp(em_queue_t queue); - -/** - * Get the associated EM queue. - * - * @param queue ODP queue - * - * @return em queue if successful, EM_QUEUE_UNDEF on error - */ -em_queue_t em_odp_queue_em(odp_queue_t queue); - -/** - * Get EM event header size. - * - * Needed when user has to configure separate pool for packet I/O and allocate - * EM events from there. - * - * @return em event header size. - */ -uint32_t em_odp_event_hdr_size(void); - -/** - * Convert EM event handle to ODP event handle. - * - * @param event EM-event handle - * - * @return ODP event handle. - */ -odp_event_t em_odp_event2odp(em_event_t event); - -/** - * Convert EM event handles to ODP event handles - * - * @param events Array of EM-events to convert to ODP-events. - * The 'events[]' array must contain 'num' valid - * event handles. - * @param[out] odp_events Output array into which the ocrresponding ODP-event - * handles are written. Array must fit 'num' entries. - * @param num Number of entries in 'events[]' and 'odp_events[]'. - */ -void em_odp_events2odp(const em_event_t events[/*num*/], - odp_event_t odp_events[/*out:num*/], int num); - -/** - * Convert ODP event handle to EM event handle. - * - * The event must have been allocated by EM originally. - * - * @param odp_event ODP-event handle - * - * @return EM event handle. - */ -em_event_t em_odp_event2em(odp_event_t odp_event); - -/** - * Convert EM event handles to ODP event handles - * - * @param odp_events Array of ODP-events to convert to EM-events. - * The 'odp_events[]' array must contain 'num' valid - * ODP-event handles. - * @param[out] events Output array into which the ocrresponding EM-event - * handles are written. Array must fit 'num' entries. - * @param num Number of entries in 'odp_events[]' and 'events[]'. - */ -void em_odp_events2em(const odp_event_t odp_events[/*num*/], - em_event_t events[/*out:num*/], int num); - -/** - * @brief Get the ODP pools used as subpools in a given EM event pool. - * - * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool - * is an ODP pool. This function outputs the ODP pool handles of these subpools - * into a user-provided array and returns the number of handles written. - * - * The obtained ODP pools must not be deleted or alterede outside of EM, - * e.g. these ODP pools must only be deleted as part of an EM event pool - * using em_pool_delete(). - * - * ODP pool handles obtained through this function can be used to - * - configure ODP pktio to use an ODP pool created via EM (allows for - * better ESV tracking) - * - print ODP-level pool statistics with ODP APIs etc. - * - * Note that direct allocations and free:s via ODP APIs will bypass - * EM checks (e.g. ESV) and might cause errors unless properely handled: - * - use em_odp_event2em() to initialize as an EM event - * - use em_event_mark_free() before ODP-free operations (SW- or HW-free) - * - * @param pool EM event pool handle. - * @param[out] odp_pools Output array to be filled with the ODP pools used as - * subpools in the given EM event pool. The array must - * fit 'num' entries. - * @param num Number of entries in the 'odp_pools[]' array. - * Using 'num=EM_MAX_SUBPOOLS' will always be large - * enough to fit all subpools in the EM event pool. - * - * @return The number of ODP pools filled into 'odp_pools[]' - */ -int em_odp_pool2odp(em_pool_t pool, odp_pool_t odp_pools[/*out*/], int num); - -/** - * @brief Get the EM event pool that the given ODP pool belongs to - * - * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool - * is an ODP pool. This function returns the EM event pool that contains the - * given ODP pool as a subpool. - * - * @param odp_pool - * - * @return The EM event pool that contains the subpool 'odp_pool' or - * EM_POOL_UNDEF if 'odp_pool' is not part of any EM event pool. - */ -em_pool_t em_odp_pool2em(odp_pool_t odp_pool); - -/** - * Enqueue external packets into EM (packets are from outside of EM, i.e not - * allocated by EM using em_alloc/_multi()) - * - * @param pkt_tbl Array of external ODP-packets to enqueue into EM as events. - * The 'pkt_tbl[]' array must contain 'num' valid ODP packet - * handles. - * @param num The number of packets in the 'pkt_tbl[]' array, must be >0. - * @param queue EM queue into which to sen/enqueue the packets as EM-events. - * - * @return The number of ODP packets successfully send/enqueued as EM-events - */ -int pkt_enqueue(const odp_packet_t pkt_tbl[/*num*/], int num, em_queue_t queue); - -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_ODP_EXT_H */ +/* + * Copyright (c) 2015-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * @defgroup em_odp_ext Conversions & extensions + * Event Machine ODP API extensions and conversion functions between EM and ODP + * @{ + */ + +#ifndef EVENT_MACHINE_ODP_EXT_H +#define EVENT_MACHINE_ODP_EXT_H + +#pragma GCC visibility push(default) + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/** + * Get the associated ODP queue. + * + * The given EM queue must have been created with em_queue_create...() APIs. + * + * @param queue EM queue + * + * @return odp queue if successful, ODP_QUEUE_INVALID on error + */ +odp_queue_t em_odp_queue_odp(em_queue_t queue); + +/** + * Get the associated EM queue. + * + * The associated EM queue must have been created with em_queue_create...() APIs + * + * @param queue ODP queue + * + * @return em queue if successful, EM_QUEUE_UNDEF on error + */ +em_queue_t em_odp_queue_em(odp_queue_t queue); + +/** + * @brief Map the given scheduled ODP pktin event queues to new EM queues. + * + * Creates new EM queues and maps them to use the given scheduled ODP pktin + * event queues. + * Enables direct scheduling of packets as EM events via EM queues. + * EM queues based on scheduled ODP pktin queues are a bit special in how they + * are created and how they are deleted: + * - creation is done via this function by providing the already set up + * scheduled ODP pktin event queues to use. + * - deletion of one of the returned EM queues will not delete the underlying + * ODP pktin event queue. The ODP queues in question are deleted when + * the ODP pktio is terminated. + * The scheduled ODP pktin event queues must have been set up with an + * ODP schedule group that belongs to an existing EM queue group. Also the used + * priority must mappable to an EM priority. + * + * Setup example: + * @code + * // Configure ODP pktin queues + * odp_pktin_queue_param_t pktin_queue_param; + * odp_pktin_queue_param_init(&pktin_queue_param); + * pktin_queue_param.num_queues = num; + * pktin_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED; + * pktin_queue_param.queue_param.sched.prio = ODP prio mappable to EM prio + * pktin_queue_param.queue_param.sched.sync = PARALLEL | ATOMIC | ORDERED; + * pktin_queue_param.queue_param.sched.group = em_odp_qgrp2odp(EM qgroup); + * ... + * ret = odp_pktin_queue_config(pktio, &pktin_queue_param); + * if (ret < 0) + * error(...); + * + * // Obtain ODP pktin event queues used for scheduled packet input + * odp_queue_t pktin_sched_queues[num]; + * ret = odp_pktin_event_queue(pktio, pktin_sched_queues['out'], num); + * if (ret != num) + * error(...); + * + * // Create EM queues mapped to the scheduled ODP pktin event queues + * em_queue_t queues_em[num]; + * ret = em_odp_pktin_event_queues2em(pktin_sched_queues['in'], + * queues_em['out'], num); + * if (ret != num) + * error(...); + * + * // Add the EM queues to an EM EO and once the EO has been started it + * // will receive pktio events directly from the scheduler. + * for (int i = 0; i < num; i++) + * err = em_eo_add_queue_sync(eo, queues_em); + * @endcode + * + * @param[in] odp_pktin_evqueues Array of ODP pktin event queues to convert to + * EM-queues. The array must contain 'num' valid + * ODP-queue handles (as returned by the + * odp_pktin_event_queue() function). + * @param[out] queues Output array into which the corresponding + * EM-queue handles are written. + * Array must fit 'num' entries. + * @param num Number of entries in 'odp_pktin_evqueues[]' + * and 'queues[]'. + * @return int Number of EM queues created that correspond to the given + * ODP pktin event queues + * @retval <0 on failure + */ +int em_odp_pktin_event_queues2em(const odp_queue_t odp_pktin_evqueues[/*num*/], + em_queue_t queues[/*out:num*/], int num); + +/** + * Get the EM event header size. + * + * Needed e.g. when configuring a separate ODP packet pool and have pktio + * allocate events usable by EM from there: + * @code + * odp_pool_param_t::pkt.uarea_size = em_odp_event_hdr_size(); + * @endcode + * + * @return EM event header size. + */ +uint32_t em_odp_event_hdr_size(void); + +/** + * Convert EM event handle to ODP event handle. + * + * @param event EM-event handle + * + * @return ODP event handle. + */ +odp_event_t em_odp_event2odp(em_event_t event); + +/** + * Convert EM event handles to ODP event handles + * + * @param events Array of EM-events to convert to ODP-events. + * The 'events[]' array must contain 'num' valid + * event handles. + * @param[out] odp_events Output array into which the corresponding ODP-event + * handles are written. Array must fit 'num' entries. + * @param num Number of entries in 'events[]' and 'odp_events[]'. + */ +void em_odp_events2odp(const em_event_t events[/*num*/], + odp_event_t odp_events[/*out:num*/], int num); + +/** + * Convert ODP event handle to EM event handle. + * + * The event must have been allocated by EM originally. + * + * @param odp_event ODP-event handle + * + * @return EM event handle. + */ +em_event_t em_odp_event2em(odp_event_t odp_event); + +/** + * Convert EM event handles to ODP event handles + * + * @param odp_events Array of ODP-events to convert to EM-events. + * The 'odp_events[]' array must contain 'num' valid + * ODP-event handles. + * @param[out] events Output array into which the corresponding EM-event + * handles are written. Array must fit 'num' entries. + * @param num Number of entries in 'odp_events[]' and 'events[]'. + */ +void em_odp_events2em(const odp_event_t odp_events[/*num*/], + em_event_t events[/*out:num*/], int num); + +/** + * @brief Get the ODP pools used as subpools in a given EM event pool. + * + * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool + * is an ODP pool. This function outputs the ODP pool handles of these subpools + * into a user-provided array and returns the number of handles written. + * + * The obtained ODP pools must not be deleted or alterede outside of EM, + * e.g. these ODP pools must only be deleted as part of an EM event pool + * using em_pool_delete(). + * + * ODP pool handles obtained through this function can be used to + * - configure ODP pktio to use an ODP pool created via EM (allows for + * better ESV tracking) + * - print ODP-level pool statistics with ODP APIs etc. + * + * Note that direct allocations and free:s via ODP APIs will bypass + * EM checks (e.g. ESV) and might cause errors unless properely handled: + * - use em_odp_event2em() to initialize as an EM event + * - use em_event_mark_free() before ODP-free operations (SW- or HW-free) + * + * @param pool EM event pool handle. + * @param[out] odp_pools Output array to be filled with the ODP pools used as + * subpools in the given EM event pool. The array must + * fit 'num' entries. + * @param num Number of entries in the 'odp_pools[]' array. + * Using 'num=EM_MAX_SUBPOOLS' will always be large + * enough to fit all subpools in the EM event pool. + * + * @return The number of ODP pools filled into 'odp_pools[]' + */ +int em_odp_pool2odp(em_pool_t pool, odp_pool_t odp_pools[/*out*/], int num); + +/** + * @brief Get the EM event pool that the given ODP pool belongs to + * + * An EM event pool consists of 1 to 'EM_MAX_SUBPOOLS' subpools. Each subpool + * is an ODP pool. This function returns the EM event pool that contains the + * given ODP pool as a subpool. + * + * @param odp_pool + * + * @return The EM event pool that contains the subpool 'odp_pool' or + * EM_POOL_UNDEF if 'odp_pool' is not part of any EM event pool. + */ +em_pool_t em_odp_pool2em(odp_pool_t odp_pool); + +/** + * @brief Get the ODP schedule group that corresponds to the given EM queue gruop + * + * @param queue_group + * @return odp_schedule_group_t + */ +odp_schedule_group_t em_odp_qgrp2odp(em_queue_group_t queue_group); + +/** + * Enqueue external packets into EM (packets are from outside of EM, i.e not + * allocated by EM using em_alloc/_multi()) + * + * @param pkt_tbl Array of external ODP-packets to enqueue into EM as events. + * The 'pkt_tbl[]' array must contain 'num' valid ODP packet + * handles. + * @param num The number of packets in the 'pkt_tbl[]' array, must be >0. + * @param queue EM queue into which to sen/enqueue the packets as EM-events. + * + * @return The number of ODP packets successfully send/enqueued as EM-events + */ +int pkt_enqueue(const odp_packet_t pkt_tbl[/*num*/], int num, em_queue_t queue); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_ODP_EXT_H */ diff --git a/include/event_machine/platform/event_machine_pool.h b/include/event_machine/platform/event_machine_pool.h index 3710f842..afa7149e 100644 --- a/include/event_machine/platform/event_machine_pool.h +++ b/include/event_machine/platform/event_machine_pool.h @@ -1,445 +1,471 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EVENT_MACHINE_POOL_H_ -#define EVENT_MACHINE_POOL_H_ - -#pragma GCC visibility push(default) - -/** - * @file - * @defgroup em_pool Event Pool - * Event Machine event pool related services - * @{ - * - * EM events are allocated from event pools with em_alloc() and freed back into - * them with em_free(). The event pools to be allocated from must first be - * created with em_pool_create(). - * - * Note that EM should always provide at least one pool, i.e. 'EM_POOL_DEFAULT' - * that can be used for event allocation. The default pool creation is platform - * specific: it can e.g. be done in 'em_init(conf)' with an appropriate - * default pool config passed via the 'conf' (em_conf_t) parameter. - * Further event pools should be created explicitly with em_pool_create(). - * - * Event pool APIs for pool deletion, lookup, iteration etc. are listed below. - * - * The 'em_pool_cfg_t' type given to em_pool_create() is HW/platform specific - * and is defined in event_machine_hw_types.h - * - * Do not include this from the application, event_machine.h will - * do it for you. - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * EM pool configuration - * - * Configuration of an EM event pool consisting of up to 'EM_MAX_SUBPOOLS' - * subpools, each supporting a specific event payload size. Event allocation, - * i.e. em_alloc(), will use the subpool that provides the best fit for the - * requested size. - * - * Example usage: - * @code - * em_pool_cfg_t pool_cfg; - * - * em_pool_cfg_init(&pool_cfg); // init with default values (mandatory) - * pool_cfg.event_type = EM_EVENT_TYPE_PACKET; - * ... - * pool_cfg.num_subpools = 4; - * pool_cfg.subpool[0].size = X; - * pool_cfg.subpool[0].num = Y; - * pool_cfg.subpool[0].cache_size = Z; - * ... - * pool = em_pool_create(..., &pool_cfg); - * @endcode - */ -typedef struct { - /** - * Event type determines the pool type used: - * - EM_EVENT_TYPE_SW creates subpools of type 'ODP_POOL_BUFFER' - * This kind of EM pool CANNOT be used to create events of major - * type EM_EVENT_TYPE_PACKET. - * - EM_EVENT_TYPE_PACKET creates subpools of type 'ODP_POOL_PACKET' - * This kind of EM pool can be used for events of all kinds. - * @note Only major types are considered here, setting minor is error - */ - em_event_type_t event_type; - /** - * Alignment offset in bytes for the event payload start address - * (for all events allocated from this EM pool). - * - * The default EM event payload start address alignment is a - * power-of-two that is at minimum 32 bytes (i.e. 32 B, 64 B, 128 B etc. - * depending on e.g. target cache-line size). - * The 'align_offset.value' option can be used to fine-tune the - * start-address by a small offset to e.g. make room for a small - * SW header before the rest of the payload that might need a specific - * alignment for direct HW-access. - * Example: setting 'align_offset.value = 8' makes sure that the payload - * _after_ 8 bytes will be aligned at minimum (2^x) 32 bytes. - * - * This option conserns all events allocated from the pool and overrides - * the global config file option 'pool.align_offset' for this pool. - */ - struct { - /** - * Select: Use pool-specific align-offset 'value' from below or - * use the global default value from the config file. - * false: Use value from the config file (default). - * true: Use pool-specific value set below. - */ - bool in_use; - /** - * Pool-specific event payload alignment offset value in bytes - * (only evaluated if 'in_use=true'). - * Overrides the config file value for this pool. - * The given 'value' must be a small power-of-two: 2, 4, or 8 - * 0: Explicitly set 'No align offset' for the pool. - */ - uint32_t value; - } align_offset; - - /** - * Event user area size in bytes. - * (for all events allocated from this EM pool). - * - * The user area is located within the event metadata (hdr) and is not - * part of the event payload. The event user area can e.g. be used to - * store additional state data related to the payload contents. EM does - * not initialize the contents of the user area. - * - * This option concerns all events allocated from the pool and overrides - * the global config file option 'pool.user_area_size' for this pool. - */ - struct { - /** - * Select: Use pool-specific event user area 'size' from below - * or use the global default value from the config file. - * false: Use user area size from the config file (default). - * true: Use pool-specific size set below. - */ - bool in_use; - /** - * Pool-specific event user area size in bytes (only evaluated - * if 'in_use=true'). - * Overrides the config file default size for this pool. - * 0: Explicitly set 'No user area' for the pool. - */ - size_t size; - } user_area; - - /** - * Parameters for an EM-pool with '.event_type = EM_EVENT_TYPE_PACKET' - * Ignored for other pool types. - */ - struct { - /** - * Pool-specific packet minimum headroom - * - * This option conserns all events allocated from the pool and - * overrides the global config file option 'pool.pkt_headroom' - * for this pool. - */ - struct { - /** - * Select: Use pool-specific packet headroom value from - * below or use the global default value from - * the config file. - * false: Use value from the config file (default). - * true: Use pool-specific value set below. - */ - bool in_use; - /** - * Pool-specific packet minimum headroom in bytes, - * each packet must have at least this much headroom. - * (only evaluated if 'in_use=true'). - * Overrides the config file value for this pool. - * 0: Explicitly set 'No align offset' for the pool. - */ - uint32_t value; - } headroom; - } pkt; - - /** - * Number of subpools within one EM pool, max=EM_MAX_SUBPOOLS - */ - int num_subpools; - struct { - /** - * Event payload size of the subpool (size > 0). - * EM does not initialize the payload data. - */ - uint32_t size; - /** Number of events in the subpool (num > 0) */ - uint32_t num; - /** - * Maximum number of locally cached subpool events per EM-core. - * - * Allocating or freeing events from a core-local event-cache - * can be faster than using the global event subpool. Cached - * events are only available on the local core and can reduce - * the number of globally free events in the subpool, thus - * consider setting 'num > EM-core-count * cache_size'. - * The actual used cache_size will be smaller than or equal to - * the requested value, depending on the implementation. - */ - uint32_t cache_size; - } subpool[EM_MAX_SUBPOOLS]; - - /** - * Internal check - don't touch! - * - * EM will verify that em_pool_cfg_init(pool_cfg) has been called before - * creating a pool with em_pool_create(..., pool_cfg) - */ - uint32_t __internal_check; -} em_pool_cfg_t; - -/** - * EM pool information and usage statistics - */ -typedef struct { - /* Pool name */ - char name[EM_POOL_NAME_LEN]; - /** EM pool handle */ - em_pool_t em_pool; - /** Event type of events allocated from the pool */ - em_event_type_t event_type; - /** Event payload alignment offset for events from the pool */ - uint32_t align_offset; - /** Event user area size for events from the pool */ - size_t user_area_size; - /** Number of subpools within one EM pool, max=EM_MAX_SUBPOOLS */ - int num_subpools; - struct { - /** Event payload size of the subpool */ - uint32_t size; - /** Number of events in the subpool */ - uint32_t num; - /** Max number of locally cached subpool events per EM-core */ - uint32_t cache_size; - /** - * Number of events allocated from the subpool. - * Only if EM config file: pool.statistics_enable=true, - * otherwise .used=0 - */ - uint32_t used; - /** - * Number of events free in the subpool. - * Only if EM config file: pool.statistics_enable=true, - * otherwise .free=0 - */ - uint32_t free; - } subpool[EM_MAX_SUBPOOLS]; -} em_pool_info_t; - -/** - * Initialize EM-pool configuration parameters for em_pool_create() - * - * Initialize em_pool_cfg_t to default values for all fields. - * After initialization, the user further needs to update the fields of - * 'em_pool_cfg_t' with appropriate sizing information before calling - * em_pool_create(). - * - * Always initialize 'pool_cfg' first with em_pool_cfg_init(pool_cfg) to - * ensure backwards compatibility with potentially added new options. - * - * @param pool_cfg Address of the em_pool_cfg_t to be initialized - * - * @see em_pool_cfg_t and em_pool_create() - */ -void em_pool_cfg_init(em_pool_cfg_t *const pool_cfg); - -/** - * Create a new EM event pool - * - * Create an EM event pool that can be used for event allocation. The event pool - * is created and configured according to the platform/HW specific em_pool_cfg_t - * given as argument. - * - * @param name Pool name (optional, NULL ok) - * @param pool A specific pool handle to be used or EM_POOL_UNDEF to let - * EM decide (i.e. use a free handle). - * @param pool_cfg Pointer to the pool config - * - * @return EM pool handle or EM_POOL_UNDEF on error - * - * @see em_pool_cfg_t and em_pool_cfg_init() - */ -em_pool_t -em_pool_create(const char *name, em_pool_t pool, const em_pool_cfg_t *pool_cfg); - -/** - * Delete an existing EM event pool - * - * @param pool EM event pool handle of the pool to be deleted. - * - * @return EM_OK if successful - */ -em_status_t -em_pool_delete(em_pool_t pool); - -/** - * Find an EM event pool by name. - * - * Finds a pool by the given name (exact match). An empty string will not match - * anything. The search is case sensitive. The function will return the first - * match only if there are duplicate names. - * - * @param name the name to look for - * - * @return pool handle or EM_POOL_UNDEF if not found - * - * @see em_pool_create() - */ -em_pool_t -em_pool_find(const char *name); - -/** - * Get the name of an EM event pool. - * - * A copy of the name string (up to 'maxlen' characters) is written to the user - * given buffer. - * The string is always null terminated, even if the given buffer length is less - * than the name length. - * - * If the event pool has no name, the function returns 0 and writes an - * empty string. - * - * @param pool EM event pool - * @param[out] name Destination buffer - * @param maxlen Maximum length (including the terminating '0') - * - * @return Number of characters written (excludes the terminating '0'). - */ -size_t -em_pool_get_name(em_pool_t pool, char *name /*out*/, size_t maxlen); - -/** - * Initialize event pool iteration and return the first event pool handle. - * - * Can be used to initialize the iteration to retrieve all created event pools - * for debugging or management purposes. Use em_pool_get_next() after this call - * until it returns EM_POOL_UNDEF. - * A new call to em_pool_get_first() resets the iteration, which is maintained - * per core (thread). The operation should be completed in one go before - * returning from the EO's event receive function (or start/stop). - * - * The number of event pools (output arg 'num') may not match the amount of - * event pools actually returned by iterating using em_pool_get_next() - * if event pools are added or removed in parallel by another core. The order - * of the returned event pool handles is undefined. - * - * @code - * unsigned int num; - * em_pool_t pool = em_pool_get_first(&num); - * while (pool != EM_POOL_UNDEF) { - * pool = em_pool_get_next(); - * } - * @endcode - * - * @param[out] num Pointer to an unsigned int to store the amount of - * event pools into - * @return The first event pool handle or EM_POOL_UNDEF if none exist - * - * @see em_pool_get_next() - */ -em_pool_t -em_pool_get_first(unsigned int *num); - -/** - * Return the next event pool handle. - * - * Continues the event pool iteration started by em_pool_get_first() - * and returns the next event pool handle. - * - * @return The next event pool handle or EM_POOL_UNDEF if the atomic - * group iteration is completed (i.e. no more event pools available). - * - * @see em_pool_get_first() - */ -em_pool_t -em_pool_get_next(void); - -/** - * Retieve information about an EM pool. - * - * @param pool EM pool handle - * @param[out] pool_info Pointer to pool info that will be written - * - * @return EM_OK if successful - * - * @note EM config file: pool.statistics_enable=true for usage statistics, - * otherwise only basic info is output omitting pool usage information - * (= all zeros). - */ -em_status_t -em_pool_info(em_pool_t pool, em_pool_info_t *pool_info /*out*/); - -/** - * Helper function to print EM Pool information for a given pool. - * - * Uses em_pool_info() when printing the pool information. - * - * @param pool EM pool handle - * - * @note EM config file: pool.statistics_enable=true for usage statistics, - * otherwise only basic info is output omitting pool usage information - * (= all zeros). - */ -void -em_pool_info_print(em_pool_t pool); - -/** - * Helper function to print EM Pool information for all pools in the system. - * - * Uses em_pool_info() when printing the pool information. - * - * @note EM config file: pool.statistics_enable=true for usage statistics, - * otherwise only basic info is output omitting pool usage information - * (= all zeros). - */ -void -em_pool_info_print_all(void); - -/** - * @} - */ -#ifdef __cplusplus -} -#endif - -#pragma GCC visibility pop -#endif /* EVENT_MACHINE_POOL_H_ */ +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EVENT_MACHINE_POOL_H_ +#define EVENT_MACHINE_POOL_H_ + +#pragma GCC visibility push(default) + +/** + * @file + * @defgroup em_pool Event Pool + * Event Machine event pool related services + * @{ + * + * EM events are allocated from event pools with em_alloc() and freed back into + * them with em_free(). The event pools to be allocated from must first be + * created with em_pool_create(). + * + * Note that EM should always provide at least one pool, i.e. 'EM_POOL_DEFAULT' + * that can be used for event allocation. The default pool creation is platform + * specific: it can e.g. be done in 'em_init(conf)' with an appropriate default + * pool configuration, which is either given in the runtime config file through + * 'startup_pools' option or passed via the 'conf' (em_conf_t) parameter of + * em_init(). + * + * In addition to the default pool, startup pools configured in the runtime + * config file through option 'startup_pools' are also created during em_init(). + * + * Further event pools should be created explicitly with em_pool_create(). + * + * Event pool APIs for pool deletion, lookup, iteration etc. are listed below. + * + * The 'em_pool_cfg_t' type given to em_pool_create() is HW/platform specific + * and is defined in event_machine_hw_types.h + * + * Do not include this from the application, event_machine.h will + * do it for you. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * EM pool configuration + * + * Configuration of an EM event pool consisting of up to 'EM_MAX_SUBPOOLS' + * subpools, each supporting a specific event payload size. Event allocation, + * i.e. em_alloc(), will use the subpool that provides the best fit for the + * requested size. + * + * Example usage: + * @code + * em_pool_cfg_t pool_cfg; + * + * em_pool_cfg_init(&pool_cfg); // init with default values (mandatory) + * pool_cfg.event_type = EM_EVENT_TYPE_PACKET; + * ... + * pool_cfg.num_subpools = 4; + * pool_cfg.subpool[0].size = X; + * pool_cfg.subpool[0].num = Y; + * pool_cfg.subpool[0].cache_size = Z; + * ... + * pool = em_pool_create(..., &pool_cfg); + * @endcode + */ +typedef struct { + /** + * Event type determines the pool type used: + * - EM_EVENT_TYPE_SW creates subpools of type 'ODP_POOL_BUFFER' + * This kind of EM pool CANNOT be used to create events of major + * type EM_EVENT_TYPE_PACKET. + * - EM_EVENT_TYPE_PACKET creates subpools of type 'ODP_POOL_PACKET' + * This kind of EM pool can be used for events of all kinds. + * - EM_EVENT_TYPE_VECTOR creates subpools of type 'ODP_POOL_VECTOR' + * This kind of EM pool can ONLY be used for creating event vectors + * @note Only major types are considered here, setting minor is error + */ + em_event_type_t event_type; + /** + * Alignment offset in bytes for the event payload start address + * (for all events allocated from this EM pool). + * + * Only valid for pools with event_type EM_EVENT_TYPE_SW or + * EM_EVENT_TYPE_PACKET (i.e. ignored for EM_EVENT_TYPE_VECTOR pools). + * + * The default EM event payload start address alignment is a + * power-of-two that is at minimum 32 bytes (i.e. 32 B, 64 B, 128 B etc. + * depending on e.g. target cache-line size). + * The 'align_offset.value' option can be used to fine-tune the + * start-address by a small offset to e.g. make room for a small + * SW header before the rest of the payload that might need a specific + * alignment for direct HW-access. + * Example: setting 'align_offset.value = 8' makes sure that the payload + * _after_ 8 bytes will be aligned at minimum (2^x) 32 bytes. + * + * This option conserns all events allocated from the pool and overrides + * the global config file option 'pool.align_offset' for this pool. + */ + struct { + /** + * Select: Use pool-specific align-offset 'value' from below or + * use the global default value 'pool.align_offset' + * from the config file. + * false: Use 'pool.align_offset' from the config file (default) + * true: Use pool-specific value set below. + */ + bool in_use; + /** + * Pool-specific event payload alignment offset value in bytes + * (only evaluated if 'in_use=true'). + * Overrides the config file value 'pool.align_offset' for this + * pool. + * The given 'value' must be a small power-of-two: 2, 4, or 8 + * 0: Explicitly set 'No align offset' for the pool. + */ + uint32_t value; + } align_offset; + + /** + * Event user area size in bytes. + * (for all events allocated from this EM pool). + * + * The user area is located within the event metadata (hdr) and is not + * part of the event payload. The event user area can e.g. be used to + * store additional state data related to the payload contents. EM does + * not initialize the contents of the user area. + * + * This option concerns all events allocated from the pool and overrides + * the global config file option 'pool.user_area_size' for this pool. + */ + struct { + /** + * Select: Use pool-specific event user area 'size' from below + * or use the global default value 'pool.user_area_size' + * from the config file. + * false: Use 'pool.user_area_size' from config file (default). + * true: Use pool-specific size set below. + */ + bool in_use; + /** + * Pool-specific event user area size in bytes (only evaluated + * if 'in_use=true'). + * Overrides the config file 'pool.user_area_size' for this pool + * 0: Explicitly set 'No user area' for the pool. + */ + size_t size; + } user_area; + + /** + * Parameters for an EM-pool with '.event_type = EM_EVENT_TYPE_PACKET' + * Ignored for other pool types. + */ + struct { + /** + * Pool-specific packet minimum headroom + * + * This option conserns all events allocated from the pool and + * overrides the global config file option 'pool.pkt_headroom' + * for this pool. + */ + struct { + /** + * Select: Use pool-specific packet headroom value from + * below or use the global default value + * 'pool.pkt_headroom' from the config file. + * false: Use 'pool.pkt_headroom' from the config file + * (default). + * true: Use pool-specific value set below. + */ + bool in_use; + /** + * Pool-specific packet minimum headroom in bytes, + * each packet must have at least this much headroom. + * (only evaluated if 'in_use=true'). + * Overrides the config file value 'pool.pkt_headroom' + * for this pool. + * 0: Explicitly set 'No headroom' for the pool. + */ + uint32_t value; + } headroom; + } pkt; + + /** + * Number of subpools within one EM pool, min=1, max=EM_MAX_SUBPOOLS + */ + int num_subpools; + /** + * Subpool params array: .subpool[num_subpools] + */ + struct { + /** + * .event_type = EM_EVENT_TYPE_SW or EM_EVENT_TYPE_PACKET: + * Event payload size of the subpool (size > 0), bytes(B). + * EM does not initialize the payload data. + * .event_type = EM_EVENT_TYPE_VECTOR: + * Max number of events in a vector from the subpool, i.e. + * 'number of em_event_t:s in the vector's event-table[]'. + * EM does not initialize the vector. + */ + uint32_t size; + + /** Number of events in the subpool (num > 0) */ + uint32_t num; + + /** + * Maximum number of locally cached subpool events per EM-core. + * + * Allocating or freeing events from a core-local event-cache + * can be faster than using the global event subpool. Cached + * events are only available on the local core and can reduce + * the number of globally free events in the subpool, thus + * consider setting 'num > EM-core-count * cache_size'. + * The actual used cache_size will be smaller than or equal to + * the requested value, depending on the implementation. + */ + uint32_t cache_size; + } subpool[EM_MAX_SUBPOOLS]; + + /** + * Internal check - don't touch! + * + * EM will verify that em_pool_cfg_init(pool_cfg) has been called before + * creating a pool with em_pool_create(..., pool_cfg) + */ + uint32_t __internal_check; +} em_pool_cfg_t; + +/** + * EM pool information and usage statistics + */ +typedef struct { + /* Pool name */ + char name[EM_POOL_NAME_LEN]; + /** EM pool handle */ + em_pool_t em_pool; + /** Event type of events allocated from the pool */ + em_event_type_t event_type; + /** Event payload alignment offset for events from the pool */ + uint32_t align_offset; + /** Event user area size for events from the pool */ + size_t user_area_size; + /** Number of subpools within one EM pool, max=EM_MAX_SUBPOOLS */ + int num_subpools; + struct { + /** Event payload size of the subpool */ + uint32_t size; + /** Number of events in the subpool */ + uint32_t num; + /** Max number of locally cached subpool events per EM-core */ + uint32_t cache_size; + /** + * Number of events allocated from the subpool. + * Only if EM config file: pool.statistics_enable=true, + * otherwise .used=0 + */ + uint32_t used; + /** + * Number of events free in the subpool. + * Only if EM config file: pool.statistics_enable=true, + * otherwise .free=0 + */ + uint32_t free; + } subpool[EM_MAX_SUBPOOLS]; +} em_pool_info_t; + +/** + * Initialize EM-pool configuration parameters for em_pool_create() + * + * Initialize em_pool_cfg_t to default values for all fields. + * After initialization, the user further needs to update the fields of + * 'em_pool_cfg_t' with appropriate sizing information before calling + * em_pool_create(). + * + * Always initialize 'pool_cfg' first with em_pool_cfg_init(pool_cfg) to + * ensure backwards compatibility with potentially added new options. + * + * @param pool_cfg Address of the em_pool_cfg_t to be initialized + * + * @see em_pool_cfg_t and em_pool_create() + */ +void em_pool_cfg_init(em_pool_cfg_t *const pool_cfg); + +/** + * Create a new EM event pool + * + * Create an EM event pool that can be used for event allocation. The event pool + * is created and configured according to the platform/HW specific em_pool_cfg_t + * given as argument. + * + * @param name Pool name (optional, NULL ok) + * @param pool A specific pool handle to be used or EM_POOL_UNDEF to let + * EM decide (i.e. use a free handle). + * @param pool_cfg Pointer to the pool config + * + * @return EM pool handle or EM_POOL_UNDEF on error + * + * @see em_pool_cfg_t and em_pool_cfg_init() + */ +em_pool_t +em_pool_create(const char *name, em_pool_t pool, const em_pool_cfg_t *pool_cfg); + +/** + * Delete an existing EM event pool + * + * @param pool EM event pool handle of the pool to be deleted. + * + * @return EM_OK if successful + */ +em_status_t +em_pool_delete(em_pool_t pool); + +/** + * Find an EM event pool by name. + * + * Finds a pool by the given name (exact match). An empty string will not match + * anything. The search is case sensitive. The function will return the first + * match only if there are duplicate names. + * + * @param name the name to look for + * + * @return pool handle or EM_POOL_UNDEF if not found + * + * @see em_pool_create() + */ +em_pool_t +em_pool_find(const char *name); + +/** + * Get the name of an EM event pool. + * + * A copy of the name string (up to 'maxlen' characters) is written to the user + * given buffer. + * The string is always null terminated, even if the given buffer length is less + * than the name length. + * + * If the event pool has no name, the function returns 0 and writes an + * empty string. + * + * @param pool EM event pool + * @param[out] name Destination buffer + * @param maxlen Maximum length (including the terminating '0') + * + * @return Number of characters written (excludes the terminating '0'). + */ +size_t +em_pool_get_name(em_pool_t pool, char *name /*out*/, size_t maxlen); + +/** + * Initialize event pool iteration and return the first event pool handle. + * + * Can be used to initialize the iteration to retrieve all created event pools + * for debugging or management purposes. Use em_pool_get_next() after this call + * until it returns EM_POOL_UNDEF. + * A new call to em_pool_get_first() resets the iteration, which is maintained + * per core (thread). The operation should be completed in one go before + * returning from the EO's event receive function (or start/stop). + * + * The number of event pools (output arg 'num') may not match the amount of + * event pools actually returned by iterating using em_pool_get_next() + * if event pools are added or removed in parallel by another core. The order + * of the returned event pool handles is undefined. + * + * @code + * unsigned int num; + * em_pool_t pool = em_pool_get_first(&num); + * while (pool != EM_POOL_UNDEF) { + * pool = em_pool_get_next(); + * } + * @endcode + * + * @param[out] num Pointer to an unsigned int to store the amount of + * event pools into + * @return The first event pool handle or EM_POOL_UNDEF if none exist + * + * @see em_pool_get_next() + */ +em_pool_t +em_pool_get_first(unsigned int *num); + +/** + * Return the next event pool handle. + * + * Continues the event pool iteration started by em_pool_get_first() + * and returns the next event pool handle. + * + * @return The next event pool handle or EM_POOL_UNDEF if the atomic + * group iteration is completed (i.e. no more event pools available). + * + * @see em_pool_get_first() + */ +em_pool_t +em_pool_get_next(void); + +/** + * Retieve information about an EM pool. + * + * @param pool EM pool handle + * @param[out] pool_info Pointer to pool info that will be written + * + * @return EM_OK if successful + * + * @note EM config file: pool.statistics_enable=true for usage statistics, + * otherwise only basic info is output omitting pool usage information + * (= all zeros). + */ +em_status_t +em_pool_info(em_pool_t pool, em_pool_info_t *pool_info /*out*/); + +/** + * Helper function to print EM Pool information for a given pool. + * + * Uses em_pool_info() when printing the pool information. + * + * @param pool EM pool handle + * + * @note EM config file: pool.statistics_enable=true for usage statistics, + * otherwise only basic info is output omitting pool usage information + * (= all zeros). + */ +void +em_pool_info_print(em_pool_t pool); + +/** + * Helper function to print EM Pool information for all pools in the system. + * + * Uses em_pool_info() when printing the pool information. + * + * @note EM config file: pool.statistics_enable=true for usage statistics, + * otherwise only basic info is output omitting pool usage information + * (= all zeros). + */ +void +em_pool_info_print_all(void); + +/** + * @} + */ +#ifdef __cplusplus +} +#endif + +#pragma GCC visibility pop +#endif /* EVENT_MACHINE_POOL_H_ */ diff --git a/m4/em_libconfig.m4 b/m4/em_libconfig.m4 index 41790a27..0ff6920c 100644 --- a/m4/em_libconfig.m4 +++ b/m4/em_libconfig.m4 @@ -3,7 +3,7 @@ ########################################################################## m4_define([_em_config_version_generation], [0]) m4_define([_em_config_version_major], [0]) -m4_define([_em_config_version_minor], [14]) +m4_define([_em_config_version_minor], [15]) m4_define([_em_config_version], [_em_config_version_generation._em_config_version_major._em_config_version_minor]) diff --git a/programs/common/cm_error_handler.h b/programs/common/cm_error_handler.h index 89ed656e..59656dd6 100644 --- a/programs/common/cm_error_handler.h +++ b/programs/common/cm_error_handler.h @@ -1,60 +1,61 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef TEST_ERROR_HANDLER_H -#define TEST_ERROR_HANDLER_H - -#ifdef __cplusplus -extern "C" { -#endif - -em_status_t -test_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); - -/* Define the test_error(...) macro */ -#define test_error(error, escope, format, ...) \ - em_error((error), (escope), __FILE__, __func__, __LINE__, \ - (format), ## __VA_ARGS__) - -#define test_fatal_if(cond, format, ...) { \ - if (unlikely((cond))) { \ - em_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, \ - __FILE__, __func__, __LINE__, \ - (format), ## __VA_ARGS__); \ - __builtin_unreachable(); \ - } \ -} - -#ifdef __cplusplus -} -#endif - -#endif +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef TEST_ERROR_HANDLER_H +#define TEST_ERROR_HANDLER_H + +#ifdef __cplusplus +extern "C" { +#endif + +em_status_t +test_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); + +/* Define the test_error(...) macro */ +#define test_error(error, escope, format, ...) \ + em_error((error), (escope), __FILE__, __func__, __LINE__, \ + (format), ## __VA_ARGS__) + +#define test_fatal_if(cond, format, ...) \ + do { \ + if (unlikely((cond))) { \ + em_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, \ + __FILE__, __func__, __LINE__, \ + (format), ## __VA_ARGS__); \ + __builtin_unreachable(); \ + } \ + } while (0) + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/programs/common/cm_pktio.c b/programs/common/cm_pktio.c index e9ce6e28..21e3bb28 100644 --- a/programs/common/cm_pktio.c +++ b/programs/common/cm_pktio.c @@ -1,1199 +1,1431 @@ -/* - * Copyright (c) 2015-2022, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * - * EM-ODP packet I/O setup - */ -#include -#include -#include -#include -#include - -#include "cm_setup.h" -#include "cm_pktio.h" - -#define PKTIO_PKT_POOL_NUM_BUFS (32 * 1024) -#define PKTIO_PKT_POOL_BUF_SIZE 1536 - -static pktio_shm_t *pktio_shm; -static __thread pktio_locm_t pktio_locm ODP_ALIGNED_CACHE; - -static inline tx_burst_t *tx_drain_burst_acquire(void); -static inline int pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/); -static inline odp_queue_t plain_queue_acquire(void); - -static const char *pktin_mode_str(pktin_mode_t in_mode) -{ - const char *str; - - switch (in_mode) { - case DIRECT_RECV: - str = "DIRECT_RECV"; - break; - case PLAIN_QUEUE: - str = "PLAIN_QUEUE"; - break; - case SCHED_PARALLEL: - str = "SCHED_PARALLEL"; - break; - case SCHED_ATOMIC: - str = "SCHED_ATOMIC"; - break; - case SCHED_ORDERED: - str = "SCHED_ORDERED"; - break; - default: - str = "UNKNOWN"; - break; - } - - return str; -} - -void pktio_mem_reserve(void) -{ - odp_shm_t shm; - uint32_t flags = 0; - - /* Sanity check: em_shm should not be set yet */ - if (unlikely(pktio_shm != NULL)) - APPL_EXIT_FAILURE("pktio shared memory ptr set - already initialized?"); - -#if ODP_VERSION_API_NUM(1, 33, 0) > ODP_VERSION_API - flags |= ODP_SHM_SINGLE_VA; -#else - odp_shm_capability_t shm_capa; - int ret = odp_shm_capability(&shm_capa); - - if (unlikely(ret)) - APPL_EXIT_FAILURE("shm capability error:%d", ret); - - if (shm_capa.flags & ODP_SHM_SINGLE_VA) - flags |= ODP_SHM_SINGLE_VA; -#endif - /* Reserve packet I/O shared memory */ - shm = odp_shm_reserve("pktio_shm", sizeof(pktio_shm_t), - ODP_CACHE_LINE_SIZE, flags); - - if (unlikely(shm == ODP_SHM_INVALID)) - APPL_EXIT_FAILURE("pktio shared mem reserve failed."); - - pktio_shm = odp_shm_addr(shm); - if (unlikely(pktio_shm == NULL)) - APPL_EXIT_FAILURE("obtaining pktio shared mem addr failed."); - - memset(pktio_shm, 0, sizeof(pktio_shm_t)); -} - -void pktio_mem_lookup(bool is_thread_per_core) -{ - odp_shm_t shm; - pktio_shm_t *shm_addr; - - shm = odp_shm_lookup("pktio_shm"); - - shm_addr = odp_shm_addr(shm); - if (unlikely(shm_addr == NULL)) - APPL_EXIT_FAILURE("pktio shared mem addr lookup failed."); - - /* - * Set pktio_shm in process-per-core mode, each process has own pointer. - */ - if (!is_thread_per_core && pktio_shm != shm_addr) - pktio_shm = shm_addr; -} - -void pktio_mem_free(void) -{ - odp_shm_t shm; - - shm = odp_shm_lookup("pktio_shm"); - if (unlikely(shm == ODP_SHM_INVALID)) - APPL_EXIT_FAILURE("pktio shared mem lookup for free failed."); - - if (odp_shm_free(shm) != 0) - APPL_EXIT_FAILURE("pktio shared mem free failed."); - pktio_shm = NULL; -} - -/** - * Helper to pktio_pool_create(): create the pktio pool as an EM event-pool - */ -static void pktio_pool_create_em(int if_count) -{ - /* - * Create the pktio pkt pool used for actual input pkts. - * Create the pool as an EM-pool (and convert into an ODP-pool where - * needed) to be able to utilize EM's Event State Verification (ESV) - * in the 'esv.prealloc_pools = true' mode (see config/em-odp.conf). - */ - odp_pool_capability_t pool_capa; - em_pool_cfg_t pool_cfg; - em_pool_t pool; - - if (odp_pool_capability(&pool_capa) != 0) - APPL_EXIT_FAILURE("can't get odp-pool capability"); - - em_pool_cfg_init(&pool_cfg); - pool_cfg.event_type = EM_EVENT_TYPE_PACKET; - pool_cfg.num_subpools = 1; - pool_cfg.subpool[0].size = PKTIO_PKT_POOL_BUF_SIZE; - pool_cfg.subpool[0].num = if_count * PKTIO_PKT_POOL_NUM_BUFS; - /* Use max thread-local pkt-cache size to speed up pktio allocs */ - pool_cfg.subpool[0].cache_size = pool_capa.pkt.max_cache_size; - pool = em_pool_create("pktio-pkt-pool", EM_POOL_UNDEF, &pool_cfg); - if (pool == EM_POOL_UNDEF) - APPL_EXIT_FAILURE("pktio pool creation failed"); - - /* Convert: EM-pool to ODP-pool */ - odp_pool_t odp_pool = ODP_POOL_INVALID; - int ret = em_odp_pool2odp(pool, &odp_pool, 1); - - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); - - /* Store the EM pktio pool and the corresponding ODP subpool */ - pktio_shm->pools.pktpool_em = pool; - pktio_shm->pools.pktpool_odp = odp_pool; - - odp_pool_print(pktio_shm->pools.pktpool_odp); -} - -/** - * Helper to pktio_pool_create(): create the pktio pool as an ODP pkt-pool - */ -static void pktio_pool_create_odp(int if_count) -{ - odp_pool_param_t pool_params; - - odp_pool_param_init(&pool_params); - pool_params.pkt.num = if_count * PKTIO_PKT_POOL_NUM_BUFS; - /* pool_params.pkt.max_num = default */ - pool_params.pkt.len = PKTIO_PKT_POOL_BUF_SIZE; - pool_params.pkt.max_len = PKTIO_PKT_POOL_BUF_SIZE; - pool_params.pkt.seg_len = PKTIO_PKT_POOL_BUF_SIZE; - - pool_params.type = ODP_POOL_PACKET; - pool_params.pkt.uarea_size = em_odp_event_hdr_size(); - - odp_pool_t odp_pool = odp_pool_create("pktio-pkt-pool", &pool_params); - - if (odp_pool == ODP_POOL_INVALID) - APPL_EXIT_FAILURE("pktio pool creation failed"); - - /* Store the ODP pktio pool */ - pktio_shm->pools.pktpool_odp = odp_pool; - pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; - - odp_pool_print(pktio_shm->pools.pktpool_odp); -} - -/** - * Create the memory pool used by pkt-io - */ -void pktio_pool_create(int if_count, bool pktpool_em) -{ - /* - * Create the pktio pkt pool used for actual input pkts. - * Create the pool either as an EM- or ODP-pool. - */ - if (pktpool_em) - pktio_pool_create_em(if_count); - else - pktio_pool_create_odp(if_count); -} - -/** - * Helper to pktio_pool_destroy(): destroy the EM event-pool used for pktio - */ -static void pktio_pool_destroy_em(void) -{ - APPL_PRINT("%s(): deleting the EM pktio-pool:\n", __func__); - em_pool_info_print(pktio_shm->pools.pktpool_em); - - if (em_pool_delete(pktio_shm->pools.pktpool_em) != EM_OK) - APPL_EXIT_FAILURE("EM pktio-pool delete failed."); - - pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; - pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; -} - -/** - * Helper to pktio_pool_destroy(): destroy the ODP pkt-pool used for pktio - */ -static void pktio_pool_destroy_odp(void) -{ - APPL_PRINT("%s(): destroying the ODP pktio-pool\n", __func__); - if (odp_pool_destroy(pktio_shm->pools.pktpool_odp) != 0) - APPL_EXIT_FAILURE("ODP pktio-pool destroy failed."); - - pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; -} - -/** - * Destroy the memory pool used by pkt-io - */ -void pktio_pool_destroy(bool pktpool_em) -{ - if (pktpool_em) - pktio_pool_destroy_em(); - else - pktio_pool_destroy_odp(); -} - -void pktio_init(const appl_conf_t *appl_conf) -{ - pktin_mode_t in_mode = appl_conf->pktio.in_mode; - odp_stash_capability_t stash_capa; - odp_stash_param_t stash_param; - odp_stash_t stash; - int ret; - - if (in_mode != DIRECT_RECV && in_mode != PLAIN_QUEUE) { - APPL_EXIT_FAILURE("Unsupported pktin-mode:%s(%d)\n", - pktin_mode_str(in_mode), in_mode); - } - - pktio_shm->ifs.count = appl_conf->pktio.if_count; - pktio_shm->ifs.num_created = 0; - pktio_shm->default_queue = EM_QUEUE_UNDEF; - - pktio_shm->pktin.in_mode = in_mode; - pktio_shm->pktin.pktin_queue_stash = ODP_STASH_INVALID; - - ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); - if (ret != 0) - APPL_EXIT_FAILURE("odp_stash_capability() fails:%d", ret); - - /* - * Create a stash to hold the shared queues used in pkt input. Each core - * needs to get one queue to be able to use it to receive packets. - * DIRECT_RECV-mode: the stash contains pointers to odp_pktin_queue_t:s - * PLAIN_QUEUE-mode: the stash contains odp_queue_t:s - */ - odp_stash_param_init(&stash_param); - stash_param.type = ODP_STASH_TYPE_FIFO; - stash_param.put_mode = ODP_STASH_OP_MT; - stash_param.get_mode = ODP_STASH_OP_MT; - stash_param.num_obj = PKTIO_MAX_IN_QUEUES * IF_MAX_NUM; - if (stash_param.num_obj > stash_capa.max_num_obj) - APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", - stash_param.num_obj); - stash_param.obj_size = MAX(sizeof(odp_queue_t), sizeof(odp_pktin_queue_t *)); - if (!POWEROF2(stash_param.obj_size) || - stash_param.obj_size != sizeof(uintptr_t) || - stash_param.obj_size > stash_capa.max_obj_size) { - APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u, max:%u", - stash_param.obj_size, stash_capa.max_obj_size); - } - stash_param.cache_size = 0; /* No core local caching */ - - stash = odp_stash_create("pktin.pktin_queue_stash", &stash_param); - if (stash == ODP_STASH_INVALID) - APPL_EXIT_FAILURE("odp_stash_create() fails"); - - pktio_shm->pktin.pktin_queue_stash = stash; - - /* - * Create a stash to hold the shared tx-burst buffers, - * used when draining the available tx-burst buffers - */ - odp_stash_param_init(&stash_param); - stash_param.type = ODP_STASH_TYPE_FIFO; - stash_param.put_mode = ODP_STASH_OP_MT; - stash_param.get_mode = ODP_STASH_OP_MT; - stash_param.num_obj = MAX_TX_BURST_BUFS * IF_MAX_NUM; - if (stash_param.num_obj > stash_capa.max_num_obj) - APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", - stash_param.num_obj); - stash_param.obj_size = sizeof(tx_burst_t *); /* stash pointers */ - if (!POWEROF2(stash_param.obj_size) || - stash_param.obj_size != sizeof(uintptr_t) || - stash_param.obj_size > stash_capa.max_obj_size) { - APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u", - stash_param.obj_size); - } - stash_param.cache_size = 0; /* No core local caching */ - - stash = odp_stash_create("pktout.tx-burst-stash", &stash_param); - if (stash == ODP_STASH_INVALID) - APPL_EXIT_FAILURE("odp_stash_create() fails"); - pktio_shm->pktout.tx_burst_stash = stash; - - /* Misc inits: */ - for (int i = 0; i < MAX_RX_PKT_QUEUES; i++) { - pktio_shm->rx_pkt_queues[i].pos = i; - pktio_shm->rx_pkt_queues[i].queue = EM_QUEUE_UNDEF; - } - - odp_ticketlock_init(&pktio_shm->tbl_lookup.lock); - pktio_shm->tbl_lookup.tbl_idx = 0; - pktio_shm->tbl_lookup.ops = odph_cuckoo_table_ops; - odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); - pktio_shm->tbl_lookup.tbl = - pktio_shm->tbl_lookup.ops.f_create("RX-lookup-tbl", MAX_RX_PKT_QUEUES, - sizeof(pkt_q_hash_key_t), - sizeof(rx_pkt_queue_t)); - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - if (unlikely(pktio_shm->tbl_lookup.tbl == NULL)) - APPL_EXIT_FAILURE("rx pkt lookup table creation fails"); -} - -void pktio_deinit(const appl_conf_t *appl_conf) -{ - (void)appl_conf; - - odp_stash_destroy(pktio_shm->pktin.pktin_queue_stash); - odp_stash_destroy(pktio_shm->pktout.tx_burst_stash); - - pktio_shm->tbl_lookup.ops.f_des(pktio_shm->tbl_lookup.tbl); -} - -static void pktio_tx_buffering_create(int if_num) -{ - tx_burst_t *tx_burst; - odp_queue_param_t queue_param; - odp_queue_t odp_queue; - int pktout_idx; - odp_queue_t pktout_queue; - char name[ODP_QUEUE_NAME_LEN]; - - const int pktout_num_queues = pktio_shm->pktout.num_queues[if_num]; - - for (int i = 0; i < MAX_TX_BURST_BUFS; i++) { - tx_burst = &pktio_shm->tx_burst[if_num][i]; - - odp_atomic_init_u64(&tx_burst->cnt, 0); - odp_spinlock_init(&tx_burst->lock); - - odp_queue_param_init(&queue_param); - queue_param.type = ODP_QUEUE_TYPE_PLAIN; - queue_param.enq_mode = ODP_QUEUE_OP_MT; - queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; - /* ignore odp ordering, EM handles output order, just buffer */ - queue_param.order = ODP_QUEUE_ORDER_IGNORE; - - snprintf(name, ODP_QUEUE_NAME_LEN, "tx-burst-if%d-%03d", - if_num, i); - name[ODP_QUEUE_NAME_LEN - 1] = '\0'; - - odp_queue = odp_queue_create(name, &queue_param); - if (unlikely(odp_queue == ODP_QUEUE_INVALID)) - APPL_EXIT_FAILURE("odp_queue_create() fails:if=%d(%d)", - if_num, i); - tx_burst->queue = odp_queue; - tx_burst->if_port = if_num; - - pktout_idx = i % pktout_num_queues; - pktout_queue = pktio_shm->pktout.queues[if_num][pktout_idx]; - tx_burst->pktout_queue = pktout_queue; - - /* - * Store each tx burst into the tx_burst_stash, stash used when - * draining the available tx-burst buffers. - */ - uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; - int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, - &tx_burst_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("enqueue fails"); - } -} - -static void pktio_tx_buffering_destroy(void) -{ - tx_burst_t *tx_burst; - int num; - - while ((tx_burst = tx_drain_burst_acquire()) != NULL) { - do { - num = odp_queue_deq_multi(tx_burst->queue, - pktio_locm.ev_burst, - MAX_PKT_BURST_TX); - if (unlikely(num <= 0)) - break; - - odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); - odp_event_free_multi(pktio_locm.ev_burst, num); - } while (num > 0); - - odp_queue_destroy(tx_burst->queue); - } -} - -static inline void -pktin_queue_stashing_create(int if_num, pktin_mode_t in_mode) -{ - int num_rx = pktio_shm->pktin.num_queues[if_num]; - uintptr_t uintptr; - int ret; - - for (int i = 0; i < num_rx; i++) { - if (in_mode == PLAIN_QUEUE) { - odp_queue_t queue; - - queue = pktio_shm->pktin.plain_queues[if_num][i]; - uintptr = (uintptr_t)queue; - } else /* DIRECT_RECV*/ { - odp_pktin_queue_t *pktin_qptr; - - pktin_qptr = &pktio_shm->pktin.pktin_queues[if_num][i]; - uintptr = (uintptr_t)pktin_qptr; - } - - /* - * Store the queue or the pktin_queue-ptr as an 'uintptr_t' - * in the stash. - */ - ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, - &uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); - } -} - -static inline void -pktin_queue_queueing_destroy(void) -{ - if (pktio_shm->pktin.in_mode == PLAIN_QUEUE) { - while (plain_queue_acquire() != ODP_QUEUE_INVALID) - ; /* empty stash */ - } else /* DIRECT_RECV*/ { - odp_pktin_queue_t *pktin_queue_ptr; - - while (pktin_queue_acquire(&pktin_queue_ptr) == 0) - ; /* empty stash */ - } -} - -/** Helper to pktio_create() for packet input configuration */ -static void pktin_config(const char *dev, int if_idx, odp_pktio_t pktio, - const odp_pktio_capability_t *pktio_capa, - int num_workers, pktin_mode_t in_mode) -{ - odp_pktin_queue_param_t pktin_queue_param; - odp_pktio_op_mode_t mode_rx; - int num_rx; - int ret; - - (void)in_mode; - - odp_pktin_queue_param_init(&pktin_queue_param); - mode_rx = ODP_PKTIO_OP_MT_UNSAFE; - num_rx = MIN((int)pktio_capa->max_input_queues, PKTIO_MAX_IN_QUEUES); - num_rx = MIN(num_rx, num_workers); - - APPL_PRINT("\tmax number of pktio dev:'%s' input queues:%d, using:%d\n", - dev, pktio_capa->max_input_queues, num_rx); - - pktin_queue_param.hash_enable = 1; - pktin_queue_param.classifier_enable = 0; - pktin_queue_param.hash_proto.proto.ipv4_udp = 1; - pktin_queue_param.num_queues = num_rx; - pktin_queue_param.op_mode = mode_rx; - - ret = odp_pktin_queue_config(pktio, &pktin_queue_param); - if (ret < 0) - APPL_EXIT_FAILURE("pktio input queue config failed dev:'%s' (%d)", - dev, ret); - - if (in_mode == PLAIN_QUEUE) { - ret = odp_pktin_event_queue(pktio, pktio_shm->pktin.plain_queues[if_idx], num_rx); - if (ret != num_rx) - APPL_EXIT_FAILURE("pktio pktin event queue query failed dev:'%s' (%d)", - dev, ret); - } else /* DIRECT_RECV*/ { - ret = odp_pktin_queue(pktio, pktio_shm->pktin.pktin_queues[if_idx], num_rx); - if (ret != num_rx) - APPL_EXIT_FAILURE("pktio pktin queue query failed dev:'%s' (%d)", - dev, ret); - } - - pktio_shm->pktin.num_queues[if_idx] = num_rx; - - /* - * Store all pktin queues in another queue - core dequeues from this - * 'rx access queues' to use an pktin queue. - */ - pktin_queue_stashing_create(if_idx, in_mode); -} - -/** Helper to pktio_create() for packet output configuration */ -static void pktout_config(const char *dev, int if_idx, odp_pktio_t pktio, - const odp_pktio_capability_t *pktio_capa, - int num_workers) -{ - odp_pktout_queue_param_t pktout_queue_param; - odp_pktio_op_mode_t mode_tx; - int num_tx, max; - int ret; - - odp_pktout_queue_param_init(&pktout_queue_param); - mode_tx = ODP_PKTIO_OP_MT; - max = MIN((int)pktio_capa->max_output_queues, PKTIO_MAX_OUT_QUEUES); - num_tx = MIN(2 * num_workers, max); - APPL_PRINT("\tmax number of pktio dev:'%s' output queues:%d, using:%d\n", - dev, pktio_capa->max_output_queues, num_tx); - - pktout_queue_param.num_queues = num_tx; - pktout_queue_param.op_mode = mode_tx; - - ret = odp_pktout_queue_config(pktio, &pktout_queue_param); - if (ret < 0) - APPL_EXIT_FAILURE("pktio output queue config failed dev:'%s' (%d)", - dev, ret); - - ret = odp_pktout_event_queue(pktio, pktio_shm->pktout.queues[if_idx], - num_tx); - if (ret != num_tx || ret > PKTIO_MAX_OUT_QUEUES) - APPL_EXIT_FAILURE("pktio pktout queue query failed dev:'%s' (%d)", - dev, ret); - pktio_shm->pktout.num_queues[if_idx] = num_tx; - - /* Create Tx buffers */ - pktio_tx_buffering_create(if_idx); -} - -int /* if_id */ -pktio_create(const char *dev, int num_workers, pktin_mode_t in_mode) -{ - int if_idx = -1; /* return value */ - odp_pktio_param_t pktio_param; - odp_pktio_t pktio; - odp_pktio_capability_t pktio_capa; - odp_pktio_config_t pktio_config; - odp_pktio_info_t info; - int ret; - - odp_pktio_param_init(&pktio_param); - - /* Packet input mode */ - if (in_mode == DIRECT_RECV) - pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT; - else if (in_mode == PLAIN_QUEUE) - pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE; - else - APPL_EXIT_FAILURE("dev:'%s': unsupported pktin-mode:%d\n", - dev, in_mode); - - /* Packet output mode: QUEUE mode to preserve packet order if needed */ - pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE; - - pktio = odp_pktio_open(dev, pktio_shm->pools.pktpool_odp, &pktio_param); - if (pktio == ODP_PKTIO_INVALID) - APPL_EXIT_FAILURE("pktio create failed for dev:'%s'\n", dev); - - if (odp_pktio_info(pktio, &info)) - APPL_EXIT_FAILURE("pktio info failed dev:'%s'", dev); - - if_idx = odp_pktio_index(pktio); - if (if_idx < 0 || if_idx >= IF_MAX_NUM) - APPL_EXIT_FAILURE("pktio index:%d too large, dev:'%s'", - if_idx, dev); - - APPL_PRINT("\n%s(dev=%s):\n", __func__, dev); - APPL_PRINT("\tcreated pktio:%" PRIu64 " idx:%d, dev:'%s', drv:%s\n", - odp_pktio_to_u64(pktio), if_idx, dev, info.drv_name); - - ret = odp_pktio_capability(pktio, &pktio_capa); - if (ret != 0) - APPL_EXIT_FAILURE("pktio capability query failed: dev:'%s' (%d)", - dev, ret); - - odp_pktio_config_init(&pktio_config); - pktio_config.parser.layer = ODP_PROTO_LAYER_NONE; - /* Provide hint to pktio that packet references are not used */ - pktio_config.pktout.bit.no_packet_refs = 1; - - ret = odp_pktio_config(pktio, &pktio_config); - if (ret != 0) - APPL_EXIT_FAILURE("pktio config failed: dev:'%s' (%d)", - dev, ret); - - /* Pktin (Rx) config */ - pktin_config(dev, if_idx, pktio, &pktio_capa, num_workers, in_mode); - - /* Pktout (Tx) config */ - pktout_config(dev, if_idx, pktio, &pktio_capa, num_workers); - - /* Start the pktio to complete configuration... */ - ret = odp_pktio_start(pktio); - if (ret != 0) - APPL_EXIT_FAILURE("Unable to start dev:'%s'", dev); - /* - * ...and stop it immediately to block odp_pktin_recv() from receiving - * pkts until application setup is ready. - * The application will start pktio when ready through pktio_start(). - */ - ret = odp_pktio_stop(pktio); - if (ret != 0) - APPL_EXIT_FAILURE("Unable to stop dev:'%s'", dev); - - APPL_PRINT("\tcreated pktio dev:'%s' - input mode:%s, output mode:QUEUE", - dev, pktin_mode_str(in_mode)); - odp_pktio_print(pktio); - - pktio_shm->ifs.idx[pktio_shm->ifs.num_created] = if_idx; - pktio_shm->ifs.pktio_hdl[if_idx] = pktio; - pktio_shm->ifs.num_created++; - - return if_idx; -} - -void -pktio_start(void) -{ - if (pktio_shm->ifs.num_created != pktio_shm->ifs.count) - APPL_EXIT_FAILURE("Pktio IFs created:%d != IF count:%d", - pktio_shm->ifs.num_created, - pktio_shm->ifs.count); - - for (int i = 0; i < pktio_shm->ifs.count; i++) { - int if_idx = pktio_shm->ifs.idx[i]; - odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; - int ret = odp_pktio_start(pktio); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("Unable to start if:%d", if_idx); - APPL_PRINT("%s(): if:%d\n", __func__, if_idx); - } - - odp_mb_full(); - pktio_shm->pktio_started = 1; -} - -void pktio_halt(void) -{ - pktio_shm->pktio_started = 0; - odp_mb_full(); - APPL_PRINT("\n%s() on EM-core %d\n", __func__, em_core_id()); -} - -void pktio_stop(void) -{ - for (int i = 0; i < pktio_shm->ifs.count; i++) { - int if_idx = pktio_shm->ifs.idx[i]; - odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; - int ret = odp_pktio_stop(pktio); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("Unable to stop if:%d", if_idx); - APPL_PRINT("%s(): if:%d\n", __func__, if_idx); - } -} - -void pktio_close(void) -{ - for (int i = 0; i < pktio_shm->ifs.count; i++) { - int if_idx = pktio_shm->ifs.idx[i]; - odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; - int ret = odp_pktio_close(pktio); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("pktio close failed for if:%d", if_idx); - - pktio_shm->ifs.pktio_hdl[if_idx] = ODP_PKTIO_INVALID; - } - - pktin_queue_queueing_destroy(); - pktio_tx_buffering_destroy(); -} - -static inline int -pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/) -{ - odp_pktin_queue_t *pktin_qptr; - uintptr_t pktin_qptr_uintptr; - - int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, - &pktin_qptr_uintptr, 1); - - if (unlikely(ret != 1)) - return -1; - - pktin_qptr = (odp_pktin_queue_t *)pktin_qptr_uintptr; - - *pktin_queue_ptr = pktin_qptr; - return 0; -} - -static inline void -pktin_queue_release(odp_pktin_queue_t *pktin_queue_ptr) -{ - uintptr_t pktin_qptr_uintptr; - - /* store the pointer as an 'uintptr_t' in the stash */ - pktin_qptr_uintptr = (uintptr_t)pktin_queue_ptr; - - int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, - &pktin_qptr_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); -} - -static inline odp_queue_t -plain_queue_acquire(void) -{ - odp_queue_t queue; - uintptr_t queue_uintptr; - - int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, - &queue_uintptr, 1); - if (unlikely(ret != 1)) - return ODP_QUEUE_INVALID; - - queue = (odp_queue_t)queue_uintptr; - - return queue; -} - -static inline void -plain_queue_release(odp_queue_t queue) -{ - uintptr_t queue_uintptr; - - /* store the queue as an 'uintptr_t' in the stash */ - queue_uintptr = (uintptr_t)queue; - - int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, - &queue_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); -} - -/* - * Helper to the pktin_pollfn_...() functions. - */ -static inline int /* nbr of pkts enqueued */ -pktin_lookup_enqueue(odp_packet_t pkt_tbl[], int pkts) -{ - const odph_table_get_value f_get = pktio_shm->tbl_lookup.ops.f_get; - rx_queue_burst_t *const rx_qbursts = pktio_locm.rx_qbursts; - int pkts_enqueued = 0; /* return value */ - int valid_pkts = 0; - - for (int i = 0; i < pkts; i++) { - const odp_packet_t pkt = pkt_tbl[i]; - void *const pkt_data = odp_packet_data(pkt); - - /* - * If 'pktio_config.parser.layer = - * ODP_PKTIO_PARSER_LAYER_L4;' then the following - * better checks can be used (is slower though). - * if (unlikely(!odp_packet_has_udp(pkt))) { - * odp_packet_free(pkt); - * continue; - * } - * - * pkt_data = odp_packet_data(pkt); - * ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + - * odp_packet_l3_offset(pkt)); - * udp = (odph_udphdr_t *)((uintptr_t)pkt_data + - * odp_packet_l4_offset(pkt)); - */ - - /* Note: no actual checks if the headers are present */ - odph_ipv4hdr_t *const ip = (odph_ipv4hdr_t *) - ((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); - odph_udphdr_t *const udp = (odph_udphdr_t *) - ((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); - /* - * NOTE! network-to-CPU conversion not needed here. - * Setup stores network-order in hash to avoid - * conversion for every packet. - */ - pktio_locm.keys[i].ip_dst = ip->dst_addr; - pktio_locm.keys[i].proto = ip->proto; - pktio_locm.keys[i].port_dst = - likely(ip->proto == ODPH_IPPROTO_UDP || - ip->proto == ODPH_IPPROTO_TCP) ? - udp->dst_port : 0; - } - - for (int i = 0; i < pkts; i++) { - const odp_packet_t pkt = pkt_tbl[i]; - rx_pkt_queue_t rx_pkt_queue; - em_queue_t queue; - int pos; - - /* table(hash) lookup to find queue */ - int ret = f_get(pktio_shm->tbl_lookup.tbl, - &pktio_locm.keys[i], - &rx_pkt_queue, sizeof(rx_pkt_queue_t)); - if (likely(ret == 0)) { - /* found */ - pos = rx_pkt_queue.pos; - queue = rx_pkt_queue.queue; - } else { - /* not found, use default queue if set */ - pos = MAX_RX_PKT_QUEUES; /* reserved space +1*/ - queue = pktio_shm->default_queue; - if (unlikely(queue == EM_QUEUE_UNDEF)) { - odp_packet_free(pkt); - continue; - } - } - - pktio_locm.positions[valid_pkts++] = pos; - rx_qbursts[pos].sent = 0; - rx_qbursts[pos].queue = queue; - rx_qbursts[pos].pkt_tbl[rx_qbursts[pos].pkt_cnt++] = pkt; - } - - for (int i = 0; i < valid_pkts; i++) { - const int pos = pktio_locm.positions[i]; - - if (rx_qbursts[pos].sent) - continue; - - const int num = rx_qbursts[pos].pkt_cnt; - const em_queue_t queue = rx_qbursts[pos].queue; - - /* Enqueue pkts into em-odp */ - pkts_enqueued += pkt_enqueue(rx_qbursts[pos].pkt_tbl, - num, queue); - rx_qbursts[pos].sent = 1; - rx_qbursts[pos].pkt_cnt = 0; - } - - return pkts_enqueued; -} - -/* - * User provided function to poll for packet input in DIRECT_RECV-mode, - * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_direct;' - * The function is of type 'em_input_poll_func_t'. See .h file. - */ -int pktin_pollfn_direct(void) -{ - odp_pktin_queue_t *pktin_queue_ptr; - odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; - int ret, pkts; - int poll_rounds = 0; - int pkts_enqueued = 0; /* return value */ - - if (unlikely(!pktio_shm->pktio_started)) - return 0; - - ret = pktin_queue_acquire(&pktin_queue_ptr /*out*/); - if (unlikely(ret != 0)) - return 0; - - do { - pkts = odp_pktin_recv(*pktin_queue_ptr, pkt_tbl, MAX_PKT_BURST_RX); - if (unlikely(pkts <= 0)) - goto pktin_poll_end; - - pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); - - } while (pkts == MAX_PKT_BURST_RX && - ++poll_rounds < MAX_RX_POLL_ROUNDS); - -pktin_poll_end: - pktin_queue_release(pktin_queue_ptr); - - return pkts_enqueued; -} - -/* - * User provided function to poll for packet input in PLAIN_QUEUE-mode, - * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_plainqueue;' - * The function is of type 'em_input_poll_func_t'. See .h file. - */ -int pktin_pollfn_plainqueue(void) -{ - odp_queue_t plain_queue; - odp_event_t ev_tbl[MAX_PKT_BURST_RX]; - odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; - int pkts; - int poll_rounds = 0; - int pkts_enqueued = 0; /* return value */ - - if (unlikely(!pktio_shm->pktio_started)) - return 0; - - plain_queue = plain_queue_acquire(); - if (unlikely(plain_queue == ODP_QUEUE_INVALID)) - return 0; - - do { - pkts = odp_queue_deq_multi(plain_queue, ev_tbl, MAX_PKT_BURST_RX); - if (unlikely(pkts <= 0)) - goto pktin_poll_end; - - odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts); - - pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); - - } while (pkts == MAX_PKT_BURST_RX && - ++poll_rounds < MAX_RX_POLL_ROUNDS); - -pktin_poll_end: - plain_queue_release(plain_queue); - - return pkts_enqueued; -} - -static inline int -pktio_tx_burst(tx_burst_t *const tx_burst) -{ - if (odp_spinlock_is_locked(&tx_burst->lock) || - odp_spinlock_trylock(&tx_burst->lock) == 0) - return 0; - - const int num = odp_queue_deq_multi(tx_burst->queue, - pktio_locm.ev_burst, - MAX_PKT_BURST_TX); - if (unlikely(num <= 0)) { - odp_spinlock_unlock(&tx_burst->lock); - return 0; - } - - odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); - - const odp_queue_t pktout_queue = tx_burst->pktout_queue; - /* Enqueue a tx burst onto the pktio queue for transmission */ - int ret = odp_queue_enq_multi(pktout_queue, pktio_locm.ev_burst, num); - - odp_spinlock_unlock(&tx_burst->lock); - - if (unlikely(ret != num)) { - if (ret < 0) - ret = 0; - odp_event_free_multi(&pktio_locm.ev_burst[ret], num - ret); - } - - return ret; -} - -/** - * @brief User provided output-queue callback function (em_output_func_t). - * - * Transmit events(pkts) via Eth Tx queues. - * - * @return The number of events actually transmitted (<= num) - */ -int pktio_tx(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args) -{ - /* Create idx to select tx-burst, always same idx for same em queue */ - const int burst_idx = (int)((uintptr_t)output_queue % - MAX_TX_BURST_BUFS); - pktio_tx_fn_args_t *const args = output_fn_args; - const int if_port = (int)(args->if_id % IF_MAX_NUM); - /* Select tx-burst onto which to temporaily store pkt/event until tx */ - tx_burst_t *const tx_burst = &pktio_shm->tx_burst[if_port][burst_idx]; - uint64_t prev_cnt; - int ret; - - if (unlikely(num == 0 || !pktio_shm->pktio_started)) - return 0; - - /* Convert into ODP-events */ - odp_event_t odp_events[num]; - - em_odp_events2odp(events, odp_events, num); - - /* - * Mark all events as "free" from EM point of view - ODP will transmit - * and free the events (=odp-pkts). - */ - em_event_mark_free_multi(events, num); - - /* - * 'sched_ctx_type = em_sched_context_type_current(&src_sched_queue)' - * could be used to determine the need for maintaining event order for - * output. Also em_queue_get_type(src_sched_queue) could further be used - * if not caring about a potentially ended sched-context caused by an - * earlier call to em_atomic/ordered_processing_end(). - * Here, none of this is done, since every event will be buffered and - * sent out in order regardless of sched context type or queue type. - */ - - ret = odp_queue_enq_multi(tx_burst->queue, odp_events, num); - if (unlikely(ret < 0)) { - /* failure: don't return, see if a burst can be Tx anyway */ - ret = 0; - } - - prev_cnt = odp_atomic_fetch_add_u64(&tx_burst->cnt, ret); - if (prev_cnt >= MAX_PKT_BURST_TX - 1) - (void)pktio_tx_burst(tx_burst); - - if (unlikely(ret < (int)num)) - em_event_unmark_free_multi(&events[ret], num - ret); - - return ret; -} - -static inline tx_burst_t * -tx_drain_burst_acquire(void) -{ - tx_burst_t *tx_burst; - uintptr_t tx_burst_uintptr; - - int ret = odp_stash_get_ptr(pktio_shm->pktout.tx_burst_stash, - &tx_burst_uintptr, 1); - if (unlikely(ret != 1)) - return NULL; - - tx_burst = (tx_burst_t *)tx_burst_uintptr; - return tx_burst; -} - -static inline void -tx_drain_burst_release(tx_burst_t *tx_burst) { - uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; - - int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, - &tx_burst_uintptr, 1); - if (unlikely(ret != 1)) - APPL_EXIT_FAILURE("stash-put fails:%d", ret); -} - -/* - * User provided function to drain buffered output, - * given to EM via 'em_conf.output.output_drain_fn = pktout_drainfn;' - * The function is of type 'em_output_drain_func_t' - */ -int pktout_drainfn(void) -{ - const uint64_t curr = odp_cpu_cycles(); /* core-local timestamp */ - const uint64_t prev = pktio_locm.tx_prev_cycles; - const uint64_t diff = likely(curr >= prev) ? - curr - prev : UINT64_MAX - prev + curr + 1; - int ret = 0; - - /* TX burst queue drain */ - if (unlikely(diff > BURST_TX_DRAIN)) { - tx_burst_t *tx_drain_burst = tx_drain_burst_acquire(); - - if (tx_drain_burst) { - ret = pktio_tx_burst(tx_drain_burst); - /* Update timestamp for next round */ - pktio_locm.tx_prev_cycles = curr; - tx_drain_burst_release(tx_drain_burst); - } - } - - return ret; -} - -void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst, - em_queue_t queue) -{ - pkt_q_hash_key_t key; - int ret, idx; - - /* Store in network format to avoid conversion during Rx lookup */ - key.ip_dst = htonl(ipv4_dst); - key.port_dst = htons(port_dst); - key.proto = proto; - - odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); - - idx = pktio_shm->tbl_lookup.tbl_idx; - if (unlikely(idx != pktio_shm->rx_pkt_queues[idx].pos)) { - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - APPL_EXIT_FAILURE("tbl insertion failed, idx(%d) != pos(%d)", - idx, pktio_shm->rx_pkt_queues[idx].pos); - return; - } - - if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); - return; - } - - pktio_shm->rx_pkt_queues[idx].queue = queue; - - ret = pktio_shm->tbl_lookup.ops.f_put(pktio_shm->tbl_lookup.tbl, &key, - &pktio_shm->rx_pkt_queues[idx]); - if (likely(ret == 0)) - pktio_shm->tbl_lookup.tbl_idx++; - - odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); - - if (unlikely(ret != 0)) - APPL_EXIT_FAILURE("tbl insertion failed"); -} - -int pktio_default_queue(em_queue_t queue) -{ - if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { - APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); - return -1; - } - - pktio_shm->default_queue = queue; - - return 0; -} - -em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst) -{ - em_queue_t queue; - rx_pkt_queue_t rx_pkt_queue; - int ret, pos; - /* Store in network format to avoid conversion during Rx lookup */ - pkt_q_hash_key_t key = {.ip_dst = htonl(ipv4_dst), - .port_dst = htons(port_dst), - .proto = proto}; - - /* table(hash) lookup to find queue */ - ret = pktio_shm->tbl_lookup.ops.f_get(pktio_shm->tbl_lookup.tbl, - &key, &rx_pkt_queue, - sizeof(rx_pkt_queue_t)); - - if (likely(ret == 0)) { - /* found */ - pos = rx_pkt_queue.pos; - queue = rx_pkt_queue.queue; - if (unlikely(queue != pktio_shm->rx_pkt_queues[pos].queue)) { - APPL_EXIT_FAILURE("%" PRI_QUEUE "!= %" PRI_QUEUE "", - queue, - pktio_shm->rx_pkt_queues[pos].queue); - return EM_QUEUE_UNDEF; - } - } else { - queue = EM_QUEUE_UNDEF; - } - - return queue; -} - -odp_pool_t pktio_pool_get(void) -{ - return pktio_shm->pools.pktpool_odp; -} +/* + * Copyright (c) 2015-2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * + * EM-ODP packet I/O setup + */ +#include +#include +#include +#include +#include + +#include "cm_setup.h" +#include "cm_pktio.h" + +#define PKTIO_PKT_POOL_NUM_BUFS (32 * 1024) +#define PKTIO_PKT_POOL_BUF_SIZE 1536 +#define PKTIO_VEC_POOL_VEC_SIZE 32 +#define PKTIO_VEC_SIZE PKTIO_VEC_POOL_VEC_SIZE +#define PKTIO_VEC_TMO ODP_TIME_MSEC_IN_NS + +static pktio_shm_t *pktio_shm; +static __thread pktio_locm_t pktio_locm ODP_ALIGNED_CACHE; + +static inline tx_burst_t *tx_drain_burst_acquire(void); +static inline int pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/); +static inline odp_queue_t plain_queue_acquire(void); + +const char *pktin_mode_str(pktin_mode_t in_mode) +{ + const char *str; + + switch (in_mode) { + case DIRECT_RECV: + str = "DIRECT_RECV"; + break; + case PLAIN_QUEUE: + str = "PLAIN_QUEUE"; + break; + case SCHED_PARALLEL: + str = "SCHED_PARALLEL"; + break; + case SCHED_ATOMIC: + str = "SCHED_ATOMIC"; + break; + case SCHED_ORDERED: + str = "SCHED_ORDERED"; + break; + default: + str = "UNKNOWN"; + break; + } + + return str; +} + +bool pktin_polled_mode(pktin_mode_t in_mode) +{ + return in_mode == DIRECT_RECV || + in_mode == PLAIN_QUEUE; +} + +bool pktin_sched_mode(pktin_mode_t in_mode) +{ + return in_mode == SCHED_PARALLEL || + in_mode == SCHED_ATOMIC || + in_mode == SCHED_ORDERED; +} + +void pktio_mem_reserve(void) +{ + odp_shm_t shm; + uint32_t flags = 0; + + /* Sanity check: em_shm should not be set yet */ + if (unlikely(pktio_shm != NULL)) + APPL_EXIT_FAILURE("pktio shared memory ptr set - already initialized?"); + +#if ODP_VERSION_API_NUM(1, 33, 0) > ODP_VERSION_API + flags |= ODP_SHM_SINGLE_VA; +#else + odp_shm_capability_t shm_capa; + int ret = odp_shm_capability(&shm_capa); + + if (unlikely(ret)) + APPL_EXIT_FAILURE("shm capability error:%d", ret); + + if (shm_capa.flags & ODP_SHM_SINGLE_VA) + flags |= ODP_SHM_SINGLE_VA; +#endif + /* Reserve packet I/O shared memory */ + shm = odp_shm_reserve("pktio_shm", sizeof(pktio_shm_t), + ODP_CACHE_LINE_SIZE, flags); + + if (unlikely(shm == ODP_SHM_INVALID)) + APPL_EXIT_FAILURE("pktio shared mem reserve failed."); + + pktio_shm = odp_shm_addr(shm); + if (unlikely(pktio_shm == NULL)) + APPL_EXIT_FAILURE("obtaining pktio shared mem addr failed."); + + memset(pktio_shm, 0, sizeof(pktio_shm_t)); +} + +void pktio_mem_lookup(bool is_thread_per_core) +{ + odp_shm_t shm; + pktio_shm_t *shm_addr; + + shm = odp_shm_lookup("pktio_shm"); + + shm_addr = odp_shm_addr(shm); + if (unlikely(shm_addr == NULL)) + APPL_EXIT_FAILURE("pktio shared mem addr lookup failed."); + + /* + * Set pktio_shm in process-per-core mode, each process has own pointer. + */ + if (!is_thread_per_core && pktio_shm != shm_addr) + pktio_shm = shm_addr; +} + +void pktio_mem_free(void) +{ + odp_shm_t shm; + + shm = odp_shm_lookup("pktio_shm"); + if (unlikely(shm == ODP_SHM_INVALID)) + APPL_EXIT_FAILURE("pktio shared mem lookup for free failed."); + + if (odp_shm_free(shm) != 0) + APPL_EXIT_FAILURE("pktio shared mem free failed."); + pktio_shm = NULL; +} + +/** + * Helper to pktio_pool_create(): create the pktio pool as an EM event-pool + */ +static void pktio_pool_create_em(int if_count, const odp_pool_capability_t *pool_capa) +{ + /* + * Create the pktio pkt pool used for actual input pkts. + * Create the pool as an EM-pool (and convert into an ODP-pool where + * needed) to be able to utilize EM's Event State Verification (ESV) + * in the 'esv.prealloc_pools = true' mode (see config/em-odp.conf). + */ + em_pool_cfg_t pool_cfg; + em_pool_t pool; + + em_pool_cfg_init(&pool_cfg); + pool_cfg.event_type = EM_EVENT_TYPE_PACKET; + pool_cfg.num_subpools = 1; + pool_cfg.subpool[0].size = PKTIO_PKT_POOL_BUF_SIZE; + pool_cfg.subpool[0].num = if_count * PKTIO_PKT_POOL_NUM_BUFS; + /* Use max thread-local pkt-cache size to speed up pktio allocs */ + pool_cfg.subpool[0].cache_size = pool_capa->pkt.max_cache_size; + pool = em_pool_create("pktio-pool-em", EM_POOL_UNDEF, &pool_cfg); + if (pool == EM_POOL_UNDEF) + APPL_EXIT_FAILURE("pktio pool creation failed"); + + /* Convert: EM-pool to ODP-pool */ + odp_pool_t odp_pool = ODP_POOL_INVALID; + int ret = em_odp_pool2odp(pool, &odp_pool, 1); + + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); + + /* Store the EM pktio pool and the corresponding ODP subpool */ + pktio_shm->pools.pktpool_em = pool; + pktio_shm->pools.pktpool_odp = odp_pool; + + odp_pool_print(pktio_shm->pools.pktpool_odp); +} + +/** + * Helper to pktio_pool_create(): create the pktio pool as an ODP pkt-pool + */ +static void pktio_pool_create_odp(int if_count, const odp_pool_capability_t *pool_capa) +{ + odp_pool_param_t pool_params; + + (void)pool_capa; + + odp_pool_param_init(&pool_params); + pool_params.pkt.num = if_count * PKTIO_PKT_POOL_NUM_BUFS; + /* pool_params.pkt.max_num = default */ + pool_params.pkt.len = PKTIO_PKT_POOL_BUF_SIZE; + pool_params.pkt.max_len = PKTIO_PKT_POOL_BUF_SIZE; + pool_params.pkt.seg_len = PKTIO_PKT_POOL_BUF_SIZE; + + pool_params.type = ODP_POOL_PACKET; + pool_params.pkt.uarea_size = em_odp_event_hdr_size(); + + odp_pool_t odp_pool = odp_pool_create("pktio-pool-odp", &pool_params); + + if (odp_pool == ODP_POOL_INVALID) + APPL_EXIT_FAILURE("pktio pool creation failed"); + + /* Store the ODP pktio pool */ + pktio_shm->pools.pktpool_odp = odp_pool; + pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; + + odp_pool_print(pktio_shm->pools.pktpool_odp); +} + +static void pktio_vectorpool_create_em(int if_count, const odp_pool_capability_t *pool_capa) +{ + if (unlikely(pool_capa->vector.max_pools == 0 || + pool_capa->vector.max_size == 0)) + APPL_EXIT_FAILURE("ODP pktin vectors not supported!"); + + uint32_t vec_size = PKTIO_VEC_POOL_VEC_SIZE; + uint32_t num_pkt = PKTIO_PKT_POOL_NUM_BUFS * if_count; + uint32_t num_vec = num_pkt; /* worst case: 1 pkt per vector */ + + if (vec_size > pool_capa->vector.max_size) { + vec_size = pool_capa->vector.max_size; + APPL_PRINT("\nWarning: pktin vector size reduced to %u\n\n", + vec_size); + } + + if (pool_capa->vector.max_num /* 0=limited only by pool memsize */ && + num_vec > pool_capa->vector.max_num) { + num_vec = pool_capa->vector.max_num; + APPL_PRINT("\nWarning: pktin number of vectors reduced to %u\n\n", + num_vec); + } + + em_pool_cfg_t pool_cfg; + + em_pool_cfg_init(&pool_cfg); + pool_cfg.event_type = EM_EVENT_TYPE_VECTOR; + pool_cfg.num_subpools = 1; + + pool_cfg.subpool[0].size = vec_size; /* nbr of events in vector */ + pool_cfg.subpool[0].num = num_vec; + /* Use max thread-local pkt-cache size to speed up pktio allocs */ + pool_cfg.subpool[0].cache_size = pool_capa->pkt.max_cache_size; + + em_pool_t vector_pool = em_pool_create("vector-pool-em", EM_POOL_UNDEF, &pool_cfg); + + if (vector_pool == EM_POOL_UNDEF) + APPL_EXIT_FAILURE("EM vector pool create failed"); + + /* Convert: EM-pool to ODP-pool */ + odp_pool_t odp_vecpool = ODP_POOL_INVALID; + int ret = em_odp_pool2odp(vector_pool, &odp_vecpool, 1); + + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("EM pktio pool creation failed:%d", ret); + + /* Store the EM pktio pool and the corresponding ODP subpool */ + pktio_shm->pools.vecpool_em = vector_pool; + pktio_shm->pools.vecpool_odp = odp_vecpool; + + odp_pool_print(odp_vecpool); +} + +static void pktio_vectorpool_create_odp(int if_count, const odp_pool_capability_t *pool_capa) +{ + odp_pool_param_t pool_params; + + odp_pool_param_init(&pool_params); + + pool_params.type = ODP_POOL_VECTOR; + + if (unlikely(pool_capa->vector.max_pools == 0 || + pool_capa->vector.max_size == 0)) + APPL_EXIT_FAILURE("ODP pktin vectors not supported!"); + + uint32_t vec_size = PKTIO_VEC_POOL_VEC_SIZE; + uint32_t num_pkt = PKTIO_PKT_POOL_NUM_BUFS * if_count; + uint32_t num_vec = num_pkt; /* worst case: 1 pkt per vector */ + + if (vec_size > pool_capa->vector.max_size) { + vec_size = pool_capa->vector.max_size; + APPL_PRINT("\nWarning: pktin vector size reduced to %u\n\n", + vec_size); + } + + if (pool_capa->vector.max_num /* 0=limited only by pool memsize */ && + num_vec > pool_capa->vector.max_num) { + num_vec = pool_capa->vector.max_num; + APPL_PRINT("\nWarning: pktin number of vectors reduced to %u\n\n", + num_vec); + } + + pool_params.vector.num = num_vec; + pool_params.vector.max_size = vec_size; + pool_params.vector.uarea_size = em_odp_event_hdr_size(); + + odp_pool_t vector_pool = odp_pool_create("vector-pool-odp", &pool_params); + + if (vector_pool == ODP_POOL_INVALID) + APPL_EXIT_FAILURE("ODP vector pool create failed"); + + pktio_shm->pools.vecpool_odp = vector_pool; + + odp_pool_print(vector_pool); +} + +/** + * Create the memory pool used by pkt-io + */ +void pktio_pool_create(int if_count, bool pktpool_em, + bool pktin_vector, bool vecpool_em) +{ + odp_pool_capability_t pool_capa; + + if (odp_pool_capability(&pool_capa) != 0) + APPL_EXIT_FAILURE("Can't get odp-pool capability"); + /* + * Create the pktio pkt pool used for actual input pkts. + * Create the pool either as an EM- or ODP-pool. + */ + if (pktpool_em) + pktio_pool_create_em(if_count, &pool_capa); + else + pktio_pool_create_odp(if_count, &pool_capa); + + if (pktin_vector) { + if (vecpool_em) + pktio_vectorpool_create_em(if_count, &pool_capa); + else + pktio_vectorpool_create_odp(if_count, &pool_capa); + } +} + +/** + * Helper to pktio_pool_destroy(): destroy the EM event-pool used for pktio + */ +static void pktio_pool_destroy_em(void) +{ + APPL_PRINT("\n%s(): deleting the EM pktio-pool:\n", __func__); + em_pool_info_print(pktio_shm->pools.pktpool_em); + + if (em_pool_delete(pktio_shm->pools.pktpool_em) != EM_OK) + APPL_EXIT_FAILURE("EM pktio-pool delete failed."); + + pktio_shm->pools.pktpool_em = EM_POOL_UNDEF; + pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; +} + +/** + * Helper to pktio_pool_destroy(): destroy the ODP pkt-pool used for pktio + */ +static void pktio_pool_destroy_odp(void) +{ + APPL_PRINT("\n%s(): destroying the ODP pktio-pool\n", __func__); + if (odp_pool_destroy(pktio_shm->pools.pktpool_odp) != 0) + APPL_EXIT_FAILURE("ODP pktio-pool destroy failed."); + + pktio_shm->pools.pktpool_odp = ODP_POOL_INVALID; +} + +/** + * Helper to pktio_pool_destroy(): destroy the pktin EM vector pool + */ +static void pktio_vectorpool_destroy_em(void) +{ + APPL_PRINT("\n%s(): deleting the EM vector-pool:\n", __func__); + em_pool_info_print(pktio_shm->pools.vecpool_em); + + if (em_pool_delete(pktio_shm->pools.vecpool_em) != EM_OK) + APPL_EXIT_FAILURE("EM pktio-pool delete failed."); + + pktio_shm->pools.vecpool_em = EM_POOL_UNDEF; + pktio_shm->pools.vecpool_odp = ODP_POOL_INVALID; +} + +/** + * Helper to pktio_pool_destroy(): destroy the ODP pktin vector pool + */ +static void pktio_vectorpool_destroy_odp(void) +{ + APPL_PRINT("\n%s(): destroying the ODP pktin vector-pool\n", __func__); + if (odp_pool_destroy(pktio_shm->pools.vecpool_odp) != 0) + APPL_EXIT_FAILURE("ODP pktin vector-pool destroy failed."); + + pktio_shm->pools.vecpool_odp = ODP_POOL_INVALID; +} + +/** + * Destroy the memory pool used by pkt-io + */ +void pktio_pool_destroy(bool pktpool_em, bool pktin_vector, bool vecpool_em) +{ + if (pktpool_em) + pktio_pool_destroy_em(); + else + pktio_pool_destroy_odp(); + + if (pktin_vector) { + if (vecpool_em) + pktio_vectorpool_destroy_em(); + else + pktio_vectorpool_destroy_odp(); + } +} + +void pktio_init(const appl_conf_t *appl_conf) +{ + pktin_mode_t in_mode = appl_conf->pktio.in_mode; + odp_stash_capability_t stash_capa; + odp_stash_param_t stash_param; + odp_stash_t stash; + int ret; + + pktio_shm->ifs.count = appl_conf->pktio.if_count; + pktio_shm->ifs.num_created = 0; + pktio_shm->default_queue = EM_QUEUE_UNDEF; + + pktio_shm->pktin.in_mode = in_mode; + pktio_shm->pktin.pktin_queue_stash = ODP_STASH_INVALID; + + ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); + if (ret != 0) + APPL_EXIT_FAILURE("odp_stash_capability() fails:%d", ret); + + if (pktin_polled_mode(in_mode)) { + /* + * Create a stash to hold the shared queues used in pkt input. Each core + * needs to get one queue to be able to use it to receive packets. + * DIRECT_RECV-mode: the stash contains pointers to odp_pktin_queue_t:s + * PLAIN_QUEUE-mode: the stash contains odp_queue_t:s + */ + odp_stash_param_init(&stash_param); + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + stash_param.get_mode = ODP_STASH_OP_MT; + stash_param.num_obj = PKTIO_MAX_IN_QUEUES * IF_MAX_NUM; + if (stash_param.num_obj > stash_capa.max_num_obj) + APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", + stash_param.num_obj); + stash_param.obj_size = MAX(sizeof(odp_queue_t), sizeof(odp_pktin_queue_t *)); + if (!POWEROF2(stash_param.obj_size) || + stash_param.obj_size != sizeof(uintptr_t) || + stash_param.obj_size > stash_capa.max_obj_size) { + APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u, max:%u", + stash_param.obj_size, stash_capa.max_obj_size); + } + stash_param.cache_size = 0; /* No core local caching */ + + stash = odp_stash_create("pktin.pktin_queue_stash", &stash_param); + if (stash == ODP_STASH_INVALID) + APPL_EXIT_FAILURE("odp_stash_create() fails"); + + pktio_shm->pktin.pktin_queue_stash = stash; + } + + /* + * Create a stash to hold the shared tx-burst buffers, + * used when draining the available tx-burst buffers + */ + odp_stash_param_init(&stash_param); + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + stash_param.get_mode = ODP_STASH_OP_MT; + stash_param.num_obj = MAX_TX_BURST_BUFS * IF_MAX_NUM; + if (stash_param.num_obj > stash_capa.max_num_obj) + APPL_EXIT_FAILURE("Unsupported odp-stash number of objects:%" PRIu64 "", + stash_param.num_obj); + stash_param.obj_size = sizeof(tx_burst_t *); /* stash pointers */ + if (!POWEROF2(stash_param.obj_size) || + stash_param.obj_size != sizeof(uintptr_t) || + stash_param.obj_size > stash_capa.max_obj_size) { + APPL_EXIT_FAILURE("Unsupported odp-stash object handle size:%u", + stash_param.obj_size); + } + stash_param.cache_size = 0; /* No core local caching */ + + stash = odp_stash_create("pktout.tx-burst-stash", &stash_param); + if (stash == ODP_STASH_INVALID) + APPL_EXIT_FAILURE("odp_stash_create() fails"); + pktio_shm->pktout.tx_burst_stash = stash; + + /* Misc inits: */ + for (int i = 0; i < MAX_RX_PKT_QUEUES; i++) { + pktio_shm->rx_pkt_queues[i].pos = i; + pktio_shm->rx_pkt_queues[i].queue = EM_QUEUE_UNDEF; + } + + odp_ticketlock_init(&pktio_shm->tbl_lookup.lock); + pktio_shm->tbl_lookup.tbl_idx = 0; + pktio_shm->tbl_lookup.ops = odph_cuckoo_table_ops; + odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); + pktio_shm->tbl_lookup.tbl = + pktio_shm->tbl_lookup.ops.f_create("RX-lookup-tbl", MAX_RX_PKT_QUEUES, + sizeof(pkt_q_hash_key_t), + sizeof(rx_pkt_queue_t)); + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + if (unlikely(pktio_shm->tbl_lookup.tbl == NULL)) + APPL_EXIT_FAILURE("rx pkt lookup table creation fails"); +} + +void pktio_deinit(const appl_conf_t *appl_conf) +{ + (void)appl_conf; + + if (pktin_polled_mode(appl_conf->pktio.in_mode)) + odp_stash_destroy(pktio_shm->pktin.pktin_queue_stash); + odp_stash_destroy(pktio_shm->pktout.tx_burst_stash); + + pktio_shm->tbl_lookup.ops.f_des(pktio_shm->tbl_lookup.tbl); +} + +static void pktio_tx_buffering_create(int if_num) +{ + tx_burst_t *tx_burst; + odp_queue_param_t queue_param; + odp_queue_t odp_queue; + int pktout_idx; + odp_queue_t pktout_queue; + char name[ODP_QUEUE_NAME_LEN]; + + const int pktout_num_queues = pktio_shm->pktout.num_queues[if_num]; + + for (int i = 0; i < MAX_TX_BURST_BUFS; i++) { + tx_burst = &pktio_shm->tx_burst[if_num][i]; + + odp_atomic_init_u64(&tx_burst->cnt, 0); + odp_spinlock_init(&tx_burst->lock); + + odp_queue_param_init(&queue_param); + queue_param.type = ODP_QUEUE_TYPE_PLAIN; + queue_param.enq_mode = ODP_QUEUE_OP_MT; + queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; + /* ignore odp ordering, EM handles output order, just buffer */ + queue_param.order = ODP_QUEUE_ORDER_IGNORE; + + snprintf(name, ODP_QUEUE_NAME_LEN, "tx-burst-if%d-%03d", + if_num, i); + name[ODP_QUEUE_NAME_LEN - 1] = '\0'; + + odp_queue = odp_queue_create(name, &queue_param); + if (unlikely(odp_queue == ODP_QUEUE_INVALID)) + APPL_EXIT_FAILURE("odp_queue_create() fails:if=%d(%d)", + if_num, i); + tx_burst->queue = odp_queue; + tx_burst->if_port = if_num; + + pktout_idx = i % pktout_num_queues; + pktout_queue = pktio_shm->pktout.queues[if_num][pktout_idx]; + tx_burst->pktout_queue = pktout_queue; + + /* + * Store each tx burst into the tx_burst_stash, stash used when + * draining the available tx-burst buffers. + */ + uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; + int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, + &tx_burst_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("enqueue fails"); + } +} + +static void pktio_tx_buffering_destroy(void) +{ + tx_burst_t *tx_burst; + int num; + + while ((tx_burst = tx_drain_burst_acquire()) != NULL) { + do { + num = odp_queue_deq_multi(tx_burst->queue, + pktio_locm.ev_burst, + MAX_PKT_BURST_TX); + if (unlikely(num <= 0)) + break; + + odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); + odp_event_free_multi(pktio_locm.ev_burst, num); + } while (num > 0); + + odp_queue_destroy(tx_burst->queue); + } +} + +static inline void +pktin_queue_stashing_create(int if_num, pktin_mode_t in_mode) +{ + int num_rx = pktio_shm->pktin.num_queues[if_num]; + uintptr_t uintptr; + int ret; + + for (int i = 0; i < num_rx; i++) { + if (in_mode == PLAIN_QUEUE) { + odp_queue_t queue; + + queue = pktio_shm->pktin.plain_queues[if_num][i]; + uintptr = (uintptr_t)queue; + } else /* DIRECT_RECV*/ { + odp_pktin_queue_t *pktin_qptr; + + pktin_qptr = &pktio_shm->pktin.pktin_queues[if_num][i]; + uintptr = (uintptr_t)pktin_qptr; + } + + /* + * Store the queue or the pktin_queue-ptr as an 'uintptr_t' + * in the stash. + */ + ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, + &uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); + } +} + +static inline void +pktin_queue_queueing_destroy(void) +{ + pktin_mode_t in_mode = pktio_shm->pktin.in_mode; + + if (in_mode == PLAIN_QUEUE) { + while (plain_queue_acquire() != ODP_QUEUE_INVALID) + ; /* empty stash */ + } else if (in_mode == DIRECT_RECV) { + odp_pktin_queue_t *pktin_queue_ptr; + + while (pktin_queue_acquire(&pktin_queue_ptr) == 0) + ; /* empty stash */ + } +} + +static void +set_pktin_vector_params(odp_pktin_queue_param_t *pktin_queue_param, + odp_pool_t vec_pool, + const odp_pktio_capability_t *pktio_capa) +{ + uint32_t vec_size = PKTIO_VEC_SIZE; + uint64_t vec_tmo_ns = PKTIO_VEC_TMO; + + pktin_queue_param->vector.enable = true; + pktin_queue_param->vector.pool = vec_pool; + + if (vec_size > pktio_capa->vector.max_size || + vec_size < pktio_capa->vector.min_size) { + vec_size = (vec_size > pktio_capa->vector.max_size) ? + pktio_capa->vector.max_size : pktio_capa->vector.min_size; + APPL_PRINT("\nWarning: Modified vector size to %u\n\n", vec_size); + } + pktin_queue_param->vector.max_size = vec_size; + + if (vec_tmo_ns > pktio_capa->vector.max_tmo_ns || + vec_tmo_ns < pktio_capa->vector.min_tmo_ns) { + vec_tmo_ns = (vec_tmo_ns > pktio_capa->vector.max_tmo_ns) ? + pktio_capa->vector.max_tmo_ns : pktio_capa->vector.min_tmo_ns; + APPL_PRINT("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns); + } + pktin_queue_param->vector.max_tmo_ns = vec_tmo_ns; +} + +/** Helper to pktio_create() for packet input configuration */ +static void pktin_config(const char *dev, int if_idx, odp_pktio_t pktio, + const odp_pktio_capability_t *pktio_capa, + int if_count, int num_workers, pktin_mode_t in_mode, + bool pktin_vector) +{ + odp_pktin_queue_param_t pktin_queue_param; + int num_rx, max; + int ret; + + odp_pktin_queue_param_init(&pktin_queue_param); + + max = MIN((int)pktio_capa->max_input_queues, PKTIO_MAX_IN_QUEUES); + num_rx = 2 * (ROUND_UP(num_workers, if_count) / if_count); + num_rx = MIN(max, num_rx); + + APPL_PRINT("\tmax number of pktio dev:'%s' input queues:%d, using:%d\n", + dev, pktio_capa->max_input_queues, num_rx); + + pktin_queue_param.hash_enable = 1; + pktin_queue_param.classifier_enable = 0; + pktin_queue_param.hash_proto.proto.ipv4_udp = 1; + pktin_queue_param.num_queues = num_rx; + + if (pktin_polled_mode(in_mode)) { + pktin_queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE; + } else if (pktin_sched_mode(in_mode)) { + pktin_queue_param.queue_param.type = ODP_QUEUE_TYPE_SCHED; + pktin_queue_param.queue_param.sched.prio = odp_schedule_default_prio(); + if (in_mode == SCHED_PARALLEL) + pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL; + else if (in_mode == SCHED_ATOMIC) + pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC; + else /* in_mode == SCHED_ORDERED */ + pktin_queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED; + + pktin_queue_param.queue_param.sched.group = em_odp_qgrp2odp(EM_QUEUE_GROUP_DEFAULT); + + if (pktin_vector) { + if (!pktio_capa->vector.supported) + APPL_EXIT_FAILURE("pktin, dev:'%s': input vectors not supported", + dev); + set_pktin_vector_params(&pktin_queue_param, + pktio_shm->pools.vecpool_odp, + pktio_capa); + } + } + + ret = odp_pktin_queue_config(pktio, &pktin_queue_param); + if (ret < 0) + APPL_EXIT_FAILURE("pktin, dev:'%s': input queue config failed: %d", + dev, ret); + + if (in_mode == PLAIN_QUEUE) { + ret = odp_pktin_event_queue(pktio, pktio_shm->pktin.plain_queues[if_idx]/*out*/, + num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': plain event queue query failed: %d", + dev, ret); + } else if (pktin_sched_mode(in_mode)) { + odp_queue_t *pktin_sched_queues = &pktio_shm->pktin.sched_queues[if_idx][0]; + em_queue_t *pktin_sched_em_queues = &pktio_shm->pktin.sched_em_queues[if_idx][0]; + + ret = odp_pktin_event_queue(pktio, pktin_sched_queues/*[out]*/, num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': odp_pktin_event_queue():%d", + dev, ret); + /* + * Create EM queues mapped to the ODP scheduled pktin event queues + */ + ret = em_odp_pktin_event_queues2em(pktin_sched_queues/*[in]*/, + pktin_sched_em_queues/*[out]*/, + num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': em_odp_pktin_queues2em():%d", + dev, ret); + } else /* DIRECT_RECV */ { + ret = odp_pktin_queue(pktio, pktio_shm->pktin.pktin_queues[if_idx]/*[out]*/, + num_rx); + if (ret != num_rx) + APPL_EXIT_FAILURE("pktin, dev:'%s': direct queue query failed: %d", + dev, ret); + } + + pktio_shm->pktin.num_queues[if_idx] = num_rx; + + if (pktin_polled_mode(in_mode)) { + /* + * Store all pktin queues in a stash - each core 'gets' aquires + * a pktin queue to use from this stash. + */ + pktin_queue_stashing_create(if_idx, in_mode); + } +} + +/** Helper to pktio_create() for packet output configuration */ +static void pktout_config(const char *dev, int if_idx, odp_pktio_t pktio, + const odp_pktio_capability_t *pktio_capa, + int num_workers) +{ + odp_pktout_queue_param_t pktout_queue_param; + odp_pktio_op_mode_t mode_tx; + int num_tx, max; + int ret; + + odp_pktout_queue_param_init(&pktout_queue_param); + mode_tx = ODP_PKTIO_OP_MT; + max = MIN((int)pktio_capa->max_output_queues, PKTIO_MAX_OUT_QUEUES); + num_tx = MIN(2 * num_workers, max); + APPL_PRINT("\tmax number of pktio dev:'%s' output queues:%d, using:%d\n", + dev, pktio_capa->max_output_queues, num_tx); + + pktout_queue_param.num_queues = num_tx; + pktout_queue_param.op_mode = mode_tx; + + ret = odp_pktout_queue_config(pktio, &pktout_queue_param); + if (ret < 0) + APPL_EXIT_FAILURE("pktio output queue config failed dev:'%s' (%d)", + dev, ret); + + ret = odp_pktout_event_queue(pktio, pktio_shm->pktout.queues[if_idx], + num_tx); + if (ret != num_tx || ret > PKTIO_MAX_OUT_QUEUES) + APPL_EXIT_FAILURE("pktio pktout queue query failed dev:'%s' (%d)", + dev, ret); + pktio_shm->pktout.num_queues[if_idx] = num_tx; + + /* Create Tx buffers */ + pktio_tx_buffering_create(if_idx); +} + +int /* if_id */ +pktio_create(const char *dev, pktin_mode_t in_mode, bool pktin_vector, + int if_count, int num_workers) +{ + int if_idx = -1; /* return value */ + odp_pktio_param_t pktio_param; + odp_pktio_t pktio; + odp_pktio_capability_t pktio_capa; + odp_pktio_config_t pktio_config; + odp_pktio_info_t info; + int ret; + + odp_pktio_param_init(&pktio_param); + + /* Packet input mode */ + if (in_mode == DIRECT_RECV) + pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT; + else if (in_mode == PLAIN_QUEUE) + pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE; + else if (pktin_sched_mode(in_mode)) + pktio_param.in_mode = ODP_PKTIN_MODE_SCHED; + else + APPL_EXIT_FAILURE("dev:'%s': unsupported pktin-mode:%d\n", + dev, in_mode); + + /* Packet output mode: QUEUE mode to preserve packet order if needed */ + pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE; + + pktio = odp_pktio_open(dev, pktio_shm->pools.pktpool_odp, &pktio_param); + if (pktio == ODP_PKTIO_INVALID) + APPL_EXIT_FAILURE("pktio create failed for dev:'%s'\n", dev); + + if (odp_pktio_info(pktio, &info)) + APPL_EXIT_FAILURE("pktio info failed dev:'%s'", dev); + + if_idx = odp_pktio_index(pktio); + if (if_idx < 0 || if_idx >= IF_MAX_NUM) + APPL_EXIT_FAILURE("pktio index:%d too large, dev:'%s'", + if_idx, dev); + + APPL_PRINT("\n%s(dev=%s):\n", __func__, dev); + APPL_PRINT("\tcreated pktio:%" PRIu64 " idx:%d, dev:'%s', drv:%s\n", + odp_pktio_to_u64(pktio), if_idx, dev, info.drv_name); + + ret = odp_pktio_capability(pktio, &pktio_capa); + if (ret != 0) + APPL_EXIT_FAILURE("pktio capability query failed: dev:'%s' (%d)", + dev, ret); + + odp_pktio_config_init(&pktio_config); + pktio_config.parser.layer = ODP_PROTO_LAYER_NONE; + /* Provide hint to pktio that packet references are not used */ + pktio_config.pktout.bit.no_packet_refs = 1; + + ret = odp_pktio_config(pktio, &pktio_config); + if (ret != 0) + APPL_EXIT_FAILURE("pktio config failed: dev:'%s' (%d)", + dev, ret); + + /* Pktin (Rx) config */ + pktin_config(dev, if_idx, pktio, &pktio_capa, + if_count, num_workers, in_mode, pktin_vector); + + /* Pktout (Tx) config */ + pktout_config(dev, if_idx, pktio, &pktio_capa, num_workers); + + /* Start the pktio to complete configuration... */ + ret = odp_pktio_start(pktio); + if (ret != 0) + APPL_EXIT_FAILURE("Unable to start dev:'%s'", dev); + /* + * ...and stop it immediately to block odp_pktin_recv() from receiving + * pkts until application setup is ready. + * The application will start pktio when ready through pktio_start(). + */ + ret = odp_pktio_stop(pktio); + if (ret != 0) + APPL_EXIT_FAILURE("Unable to stop dev:'%s'", dev); + + APPL_PRINT("\tcreated pktio dev:'%s' - input mode:%s, output mode:QUEUE", + dev, pktin_mode_str(in_mode)); + odp_pktio_print(pktio); + + pktio_shm->ifs.idx[pktio_shm->ifs.num_created] = if_idx; + pktio_shm->ifs.pktio_hdl[if_idx] = pktio; + pktio_shm->ifs.num_created++; + + return if_idx; +} + +void +pktio_start(void) +{ + if (pktio_shm->ifs.num_created != pktio_shm->ifs.count) + APPL_EXIT_FAILURE("Pktio IFs created:%d != IF count:%d", + pktio_shm->ifs.num_created, + pktio_shm->ifs.count); + + for (int i = 0; i < pktio_shm->ifs.count; i++) { + int if_idx = pktio_shm->ifs.idx[i]; + odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; + int ret = odp_pktio_start(pktio); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("Unable to start if:%d", if_idx); + APPL_PRINT("%s(): if:%d\n", __func__, if_idx); + } + + odp_mb_full(); + pktio_shm->pktio_started = 1; +} + +void pktio_halt(void) +{ + pktio_shm->pktio_started = 0; + odp_mb_full(); + APPL_PRINT("\n%s() on EM-core %d\n", __func__, em_core_id()); +} + +void pktio_stop(void) +{ + for (int i = 0; i < pktio_shm->ifs.count; i++) { + int if_idx = pktio_shm->ifs.idx[i]; + odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; + int ret = odp_pktio_stop(pktio); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("Unable to stop if:%d", if_idx); + APPL_PRINT("%s(): if:%d\n", __func__, if_idx); + } +} + +void pktio_close(void) +{ + for (int i = 0; i < pktio_shm->ifs.count; i++) { + int if_idx = pktio_shm->ifs.idx[i]; + odp_pktio_t pktio = pktio_shm->ifs.pktio_hdl[if_idx]; + int ret = odp_pktio_close(pktio); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("pktio close failed for if:%d", if_idx); + + pktio_shm->ifs.pktio_hdl[if_idx] = ODP_PKTIO_INVALID; + } + + if (pktin_polled_mode(pktio_shm->pktin.in_mode)) + pktin_queue_queueing_destroy(); + pktio_tx_buffering_destroy(); +} + +static inline int +pktin_queue_acquire(odp_pktin_queue_t **pktin_queue_ptr /*out*/) +{ + odp_pktin_queue_t *pktin_qptr; + uintptr_t pktin_qptr_uintptr; + + int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, + &pktin_qptr_uintptr, 1); + + if (unlikely(ret != 1)) + return -1; + + pktin_qptr = (odp_pktin_queue_t *)pktin_qptr_uintptr; + + *pktin_queue_ptr = pktin_qptr; + return 0; +} + +static inline void +pktin_queue_release(odp_pktin_queue_t *pktin_queue_ptr) +{ + uintptr_t pktin_qptr_uintptr; + + /* store the pointer as an 'uintptr_t' in the stash */ + pktin_qptr_uintptr = (uintptr_t)pktin_queue_ptr; + + int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, + &pktin_qptr_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); +} + +static inline odp_queue_t +plain_queue_acquire(void) +{ + odp_queue_t queue; + uintptr_t queue_uintptr; + + int ret = odp_stash_get_ptr(pktio_shm->pktin.pktin_queue_stash, + &queue_uintptr, 1); + if (unlikely(ret != 1)) + return ODP_QUEUE_INVALID; + + queue = (odp_queue_t)queue_uintptr; + + return queue; +} + +static inline void +plain_queue_release(odp_queue_t queue) +{ + uintptr_t queue_uintptr; + + /* store the queue as an 'uintptr_t' in the stash */ + queue_uintptr = (uintptr_t)queue; + + int ret = odp_stash_put_ptr(pktio_shm->pktin.pktin_queue_stash, + &queue_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); +} + +/* + * Helper to the pktin_pollfn_...() functions. + */ +static inline int /* nbr of pkts enqueued */ +pktin_lookup_enqueue(odp_packet_t pkt_tbl[], int pkts) +{ + const odph_table_get_value f_get = pktio_shm->tbl_lookup.ops.f_get; + rx_queue_burst_t *const rx_qbursts = pktio_locm.rx_qbursts; + int pkts_enqueued = 0; /* return value */ + int valid_pkts = 0; + + for (int i = 0; i < pkts; i++) { + const odp_packet_t pkt = pkt_tbl[i]; + void *const pkt_data = odp_packet_data(pkt); + + /* + * If 'pktio_config.parser.layer = + * ODP_PKTIO_PARSER_LAYER_L4;' then the following + * better checks can be used (is slower though). + * if (unlikely(!odp_packet_has_udp(pkt))) { + * odp_packet_free(pkt); + * continue; + * } + * + * pkt_data = odp_packet_data(pkt); + * ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + + * odp_packet_l3_offset(pkt)); + * udp = (odph_udphdr_t *)((uintptr_t)pkt_data + + * odp_packet_l4_offset(pkt)); + */ + + /* Note: no actual checks if the headers are present */ + odph_ipv4hdr_t *const ip = (odph_ipv4hdr_t *) + ((uintptr_t)pkt_data + sizeof(odph_ethhdr_t)); + odph_udphdr_t *const udp = (odph_udphdr_t *) + ((uintptr_t)ip + sizeof(odph_ipv4hdr_t)); + /* + * NOTE! network-to-CPU conversion not needed here. + * Setup stores network-order in hash to avoid + * conversion for every packet. + */ + pktio_locm.keys[i].ip_dst = ip->dst_addr; + pktio_locm.keys[i].proto = ip->proto; + pktio_locm.keys[i].port_dst = + likely(ip->proto == ODPH_IPPROTO_UDP || + ip->proto == ODPH_IPPROTO_TCP) ? + udp->dst_port : 0; + } + + for (int i = 0; i < pkts; i++) { + const odp_packet_t pkt = pkt_tbl[i]; + rx_pkt_queue_t rx_pkt_queue; + em_queue_t queue; + int pos; + + /* table(hash) lookup to find queue */ + int ret = f_get(pktio_shm->tbl_lookup.tbl, + &pktio_locm.keys[i], + &rx_pkt_queue, sizeof(rx_pkt_queue_t)); + if (likely(ret == 0)) { + /* found */ + pos = rx_pkt_queue.pos; + queue = rx_pkt_queue.queue; + } else { + /* not found, use default queue if set */ + pos = MAX_RX_PKT_QUEUES; /* reserved space +1*/ + queue = pktio_shm->default_queue; + if (unlikely(queue == EM_QUEUE_UNDEF)) { + odp_packet_free(pkt); + continue; + } + } + + pktio_locm.positions[valid_pkts++] = pos; + rx_qbursts[pos].sent = 0; + rx_qbursts[pos].queue = queue; + rx_qbursts[pos].pkt_tbl[rx_qbursts[pos].pkt_cnt++] = pkt; + } + + for (int i = 0; i < valid_pkts; i++) { + const int pos = pktio_locm.positions[i]; + + if (rx_qbursts[pos].sent) + continue; + + const int num = rx_qbursts[pos].pkt_cnt; + const em_queue_t queue = rx_qbursts[pos].queue; + + /* Enqueue pkts into em-odp */ + pkts_enqueued += pkt_enqueue(rx_qbursts[pos].pkt_tbl, + num, queue); + rx_qbursts[pos].sent = 1; + rx_qbursts[pos].pkt_cnt = 0; + } + + return pkts_enqueued; +} + +/* + * User provided function to poll for packet input in DIRECT_RECV-mode, + * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_direct;' + * The function is of type 'em_input_poll_func_t'. See .h file. + */ +int pktin_pollfn_direct(void) +{ + odp_pktin_queue_t *pktin_queue_ptr; + odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; + int ret, pkts; + int poll_rounds = 0; + int pkts_enqueued = 0; /* return value */ + + if (unlikely(!pktio_shm->pktio_started)) + return 0; + + ret = pktin_queue_acquire(&pktin_queue_ptr /*out*/); + if (unlikely(ret != 0)) + return 0; + + do { + pkts = odp_pktin_recv(*pktin_queue_ptr, pkt_tbl, MAX_PKT_BURST_RX); + if (unlikely(pkts <= 0)) + goto pktin_poll_end; + + pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); + + } while (pkts == MAX_PKT_BURST_RX && + ++poll_rounds < MAX_RX_POLL_ROUNDS); + +pktin_poll_end: + pktin_queue_release(pktin_queue_ptr); + + return pkts_enqueued; +} + +/* + * User provided function to poll for packet input in PLAIN_QUEUE-mode, + * given to EM via 'em_conf.input.input_poll_fn = pktin_pollfn_plainqueue;' + * The function is of type 'em_input_poll_func_t'. See .h file. + */ +int pktin_pollfn_plainqueue(void) +{ + odp_queue_t plain_queue; + odp_event_t ev_tbl[MAX_PKT_BURST_RX]; + odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; + int pkts; + int poll_rounds = 0; + int pkts_enqueued = 0; /* return value */ + + if (unlikely(!pktio_shm->pktio_started)) + return 0; + + plain_queue = plain_queue_acquire(); + if (unlikely(plain_queue == ODP_QUEUE_INVALID)) + return 0; + + do { + pkts = odp_queue_deq_multi(plain_queue, ev_tbl, MAX_PKT_BURST_RX); + if (unlikely(pkts <= 0)) + goto pktin_poll_end; + + odp_packet_from_event_multi(pkt_tbl, ev_tbl, pkts); + + pkts_enqueued += pktin_lookup_enqueue(pkt_tbl, pkts); + + } while (pkts == MAX_PKT_BURST_RX && + ++poll_rounds < MAX_RX_POLL_ROUNDS); + +pktin_poll_end: + plain_queue_release(plain_queue); + + return pkts_enqueued; +} + +static inline int +pktio_tx_burst(tx_burst_t *const tx_burst) +{ + if (odp_spinlock_is_locked(&tx_burst->lock) || + odp_spinlock_trylock(&tx_burst->lock) == 0) + return 0; + + const int num = odp_queue_deq_multi(tx_burst->queue, + pktio_locm.ev_burst, + MAX_PKT_BURST_TX); + if (unlikely(num <= 0)) { + odp_spinlock_unlock(&tx_burst->lock); + return 0; + } + + odp_atomic_sub_u64(&tx_burst->cnt, (uint64_t)num); + + const odp_queue_t pktout_queue = tx_burst->pktout_queue; + /* Enqueue a tx burst onto the pktio queue for transmission */ + int ret = odp_queue_enq_multi(pktout_queue, pktio_locm.ev_burst, num); + + odp_spinlock_unlock(&tx_burst->lock); + + if (unlikely(ret != num)) { + if (ret < 0) + ret = 0; + odp_event_free_multi(&pktio_locm.ev_burst[ret], num - ret); + } + + return ret; +} + +/** + * @brief User provided output-queue callback function (em_output_func_t). + * + * Transmit events(pkts) via Eth Tx queues. + * + * @return The number of events actually transmitted (<= num) + */ +int pktio_tx(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args) +{ + /* Create idx to select tx-burst, always same idx for same em queue */ + const int burst_idx = (int)((uintptr_t)output_queue % + MAX_TX_BURST_BUFS); + pktio_tx_fn_args_t *const args = output_fn_args; + const int if_port = (int)(args->if_id % IF_MAX_NUM); + /* Select tx-burst onto which to temporaily store pkt/event until tx */ + tx_burst_t *const tx_burst = &pktio_shm->tx_burst[if_port][burst_idx]; + uint64_t prev_cnt; + int ret; + + if (unlikely(num == 0 || !pktio_shm->pktio_started)) + return 0; + + /* Convert into ODP-events */ + odp_event_t odp_events[num]; + + em_odp_events2odp(events, odp_events, num); + + /* + * Mark all events as "free" from EM point of view - ODP will transmit + * and free the events (=odp-pkts). + */ + em_event_mark_free_multi(events, num); + + /* + * 'sched_ctx_type = em_sched_context_type_current(&src_sched_queue)' + * could be used to determine the need for maintaining event order for + * output. Also em_queue_get_type(src_sched_queue) could further be used + * if not caring about a potentially ended sched-context caused by an + * earlier call to em_atomic/ordered_processing_end(). + * Here, none of this is done, since every event will be buffered and + * sent out in order regardless of sched context type or queue type. + */ + + ret = odp_queue_enq_multi(tx_burst->queue, odp_events, num); + if (unlikely(ret < 0)) { + /* failure: don't return, see if a burst can be Tx anyway */ + ret = 0; + } + + prev_cnt = odp_atomic_fetch_add_u64(&tx_burst->cnt, ret); + if (prev_cnt >= MAX_PKT_BURST_TX - 1) + (void)pktio_tx_burst(tx_burst); + + if (unlikely(ret < (int)num)) + em_event_unmark_free_multi(&events[ret], num - ret); + + return ret; +} + +static inline tx_burst_t * +tx_drain_burst_acquire(void) +{ + tx_burst_t *tx_burst; + uintptr_t tx_burst_uintptr; + + int ret = odp_stash_get_ptr(pktio_shm->pktout.tx_burst_stash, + &tx_burst_uintptr, 1); + if (unlikely(ret != 1)) + return NULL; + + tx_burst = (tx_burst_t *)tx_burst_uintptr; + return tx_burst; +} + +static inline void +tx_drain_burst_release(tx_burst_t *tx_burst) { + uintptr_t tx_burst_uintptr = (uintptr_t)tx_burst; + + int ret = odp_stash_put_ptr(pktio_shm->pktout.tx_burst_stash, + &tx_burst_uintptr, 1); + if (unlikely(ret != 1)) + APPL_EXIT_FAILURE("stash-put fails:%d", ret); +} + +/* + * User provided function to drain buffered output, + * given to EM via 'em_conf.output.output_drain_fn = pktout_drainfn;' + * The function is of type 'em_output_drain_func_t' + */ +int pktout_drainfn(void) +{ + const uint64_t curr = odp_cpu_cycles(); /* core-local timestamp */ + const uint64_t prev = pktio_locm.tx_prev_cycles; + const uint64_t diff = likely(curr >= prev) ? + curr - prev : UINT64_MAX - prev + curr + 1; + int ret = 0; + + /* TX burst queue drain */ + if (unlikely(diff > BURST_TX_DRAIN)) { + tx_burst_t *tx_drain_burst = tx_drain_burst_acquire(); + + if (tx_drain_burst) { + ret = pktio_tx_burst(tx_drain_burst); + /* Update timestamp for next round */ + pktio_locm.tx_prev_cycles = curr; + tx_drain_burst_release(tx_drain_burst); + } + } + + return ret; +} + +void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst, + em_queue_t queue) +{ + pkt_q_hash_key_t key; + int ret, idx; + + /* Store in network format to avoid conversion during Rx lookup */ + key.ip_dst = htonl(ipv4_dst); + key.port_dst = htons(port_dst); + key.proto = proto; + + odp_ticketlock_lock(&pktio_shm->tbl_lookup.lock); + + idx = pktio_shm->tbl_lookup.tbl_idx; + if (unlikely(idx != pktio_shm->rx_pkt_queues[idx].pos)) { + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + APPL_EXIT_FAILURE("tbl insertion failed, idx(%d) != pos(%d)", + idx, pktio_shm->rx_pkt_queues[idx].pos); + return; + } + + if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); + return; + } + + pktio_shm->rx_pkt_queues[idx].queue = queue; + + ret = pktio_shm->tbl_lookup.ops.f_put(pktio_shm->tbl_lookup.tbl, &key, + &pktio_shm->rx_pkt_queues[idx]); + if (likely(ret == 0)) + pktio_shm->tbl_lookup.tbl_idx++; + + odp_ticketlock_unlock(&pktio_shm->tbl_lookup.lock); + + if (unlikely(ret != 0)) + APPL_EXIT_FAILURE("tbl insertion failed"); +} + +int pktio_default_queue(em_queue_t queue) +{ + if (unlikely(em_queue_get_type(queue) == EM_QUEUE_TYPE_UNDEF)) { + APPL_EXIT_FAILURE("Invalid queue:%" PRI_QUEUE "", queue); + return -1; + } + + pktio_shm->default_queue = queue; + + return 0; +} + +em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, uint16_t port_dst) +{ + em_queue_t queue; + rx_pkt_queue_t rx_pkt_queue; + int ret, pos; + /* Store in network format to avoid conversion during Rx lookup */ + pkt_q_hash_key_t key = {.ip_dst = htonl(ipv4_dst), + .port_dst = htons(port_dst), + .proto = proto}; + + /* table(hash) lookup to find queue */ + ret = pktio_shm->tbl_lookup.ops.f_get(pktio_shm->tbl_lookup.tbl, + &key, &rx_pkt_queue, + sizeof(rx_pkt_queue_t)); + + if (likely(ret == 0)) { + /* found */ + pos = rx_pkt_queue.pos; + queue = rx_pkt_queue.queue; + if (unlikely(queue != pktio_shm->rx_pkt_queues[pos].queue)) { + APPL_EXIT_FAILURE("%" PRI_QUEUE "!= %" PRI_QUEUE "", + queue, + pktio_shm->rx_pkt_queues[pos].queue); + return EM_QUEUE_UNDEF; + } + } else { + queue = EM_QUEUE_UNDEF; + } + + return queue; +} + +odp_pool_t pktio_pool_get(void) +{ + return pktio_shm->pools.pktpool_odp; +} diff --git a/programs/common/cm_pktio.h b/programs/common/cm_pktio.h index ddcd3f6c..b59223ff 100644 --- a/programs/common/cm_pktio.h +++ b/programs/common/cm_pktio.h @@ -1,584 +1,620 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef CM_PKTIO_H -#define CM_PKTIO_H - -#ifdef __cplusplus -extern "C" { -#endif - -#include -#include -#include -#include - -#include -#include -#include - -#define IPV4_PROTO_UDP ODPH_IPPROTO_UDP - -/** - * @def PKTIO_MAX_IN_QUEUES - * @brief Maximum number of odp pktio input queues per interface - */ -#define PKTIO_MAX_IN_QUEUES 32 - -/** - * @def PKTIO_MAX_OUT_QUEUES - * @brief Maximum number of odp pktio output queues per interface - */ -#define PKTIO_MAX_OUT_QUEUES 16 - -/** - * @def MAX_PKT_BURST_RX - * @brief Maximum number of packets received from a pktio input queue - * in one burst - */ -#define MAX_PKT_BURST_RX 32 - -/** - * @def MAX_PKT_BURST_TX - * @brief Maximum number of packets bursted onto a pktout queue - */ -#define MAX_PKT_BURST_TX 32 - -/** - * @def MAX_TX_BURST_BUFS - * @brief Maximum number of tx burst buffers per interface - * - * Store Tx pkts in output buffers until a buffer has 'MAX_PKT_BURST_TX' pkts, - * then transmit the whole burst of pkts instead of one by one. - */ -#define MAX_TX_BURST_BUFS EM_MAX_CORES - -/** - * @def MAX_RX_PKT_QUEUES - * @brief - */ -#define MAX_RX_PKT_QUEUES (4 * 64) - -/** - * @def MAX_RX_POLL_ROUNDS - * @brief - */ -#define MAX_RX_POLL_ROUNDS 4 - -/** - * @def BURST_TX_DRAIN - * @brief The number of core cycles between timed TX buf drain operations - */ -#define BURST_TX_DRAIN (400000ULL) /* around 200us at 2 Ghz */ - -/** Ethernet MAC address */ -typedef union { - uint8_t u8[6]; - uint16_t u16[3]; -} mac_addr_t; - -/** IPv4 address */ -typedef union { - uint8_t u8[4]; - uint16_t u16[2]; - uint32_t u32; -} ipv4_addr_t; - -/** - * @brief pkt header fields to use as hash key - * - * Fields from incoming packets used for destination em-odp queue lookup. - */ -struct pkt_dst_tuple { - /* uint32_t ip_src;*/ - uint32_t ip_dst; - /* uint16_t port_src;*/ - uint16_t port_dst; - uint16_t proto; -} __attribute__((__packed__)); - -/** Use the struct pkt_dst_tuple as hash key for em-odp queue lookups */ -typedef struct pkt_dst_tuple pkt_q_hash_key_t; - -/* Keep size multiple of 32-bits for faster hash-crc32 calculation*/ -ODP_STATIC_ASSERT(sizeof(pkt_q_hash_key_t) % sizeof(uint32_t) == 0, - "HASH_KEY_NOT_MULTIP_OF_32__ERROR"); - -/** - * @brief Info about em-odp queue to use, returned by hash lookup - * - * Information about an em-odp queue used for pktio, stored in a hash table and - * used when doing a tbl lookup to determine the destination em-odp queue - * for a received packet. - */ -typedef struct { - int pos; - em_queue_t queue; -} rx_pkt_queue_t; - -/** - * @brief Tx pkt burst buffer - * - * Buffer up to 'MAX_PKT_BURST_TX' pkts before bursting them all onto - * the associated 'pktout_queue' at once. - */ -typedef struct tx_burst { - /** store tx pkts temporaily in 'queue' before bursting onto tx */ - odp_queue_t queue ODP_ALIGNED_CACHE; - /** count the number of events in 'queue', updated atomically */ - odp_atomic_u64_t cnt; - /** lock needed when dequeueing from 'queue' */ - odp_spinlock_t lock; - /** store the output interface port also here for easy access */ - int if_port; - /** Transmit burst using this pktout_queue */ - odp_queue_t pktout_queue; -} tx_burst_t; - -/** - * @brief Rx pkt storage for pkts destined to the same em-odp queue - * - * Temporary storage for events to be enqueued onto the _same_ queue - * after receiving a packet burst on Rx - */ -typedef struct { - int sent; - int pkt_cnt; - em_queue_t queue; - odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; -} rx_queue_burst_t; - -/** - * @brief Pktio shared memory - * - * Collection of shared data used by pktio Rx&Tx - */ -typedef struct { - /** flag set after pktio_start() - prevent pkio rx&tx before started */ - int pktio_started; - - /** Default queue to use for incoming pkts without a dedicated queue */ - em_queue_t default_queue; - - struct { - /** EM pool for pktio, only used with '--pktpool-em' option */ - em_pool_t pktpool_em; - - /** ODP pool for pktio: - * 1. Subpool of 'pktpool_em' when using '--pktpool-em' option - * or - * 2. Direct ODP pkt pool when using '--pktpool-odp' option - */ - odp_pool_t pktpool_odp; - } pools; - - /** Packet I/O Interfaces */ - struct { - /** The number of pktio interfaces used */ - int count; - /** Interfaces created so far (up to '.count'), startup only */ - int num_created; - /** Interface indexes used */ - int idx[IF_MAX_NUM]; - /** ODP pktio handles, .pktio_hdl[idx] corresponds to idx=.idx[i] */ - odp_pktio_t pktio_hdl[IF_MAX_NUM]; - } ifs; - - /** Packet input and related resources */ - struct { - /* Packet input mode */ - pktin_mode_t in_mode; - - /** Number of input queues per interface */ - int num_queues[IF_MAX_NUM]; - - /** pktin queues used in DIRECT_RECV-mode, per interface */ - odp_pktin_queue_t pktin_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; - - /** plain event queues used in PLAIN_QUEUE-mode, per interface */ - odp_queue_t plain_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; - - /** A queue that contains pointers to the shared - * pktin_queues[][] in DIRECT_RECV-mode or to the shared - * plain_queues[][] in PLAIN_QUEUE-mode. - * Each core needs to dequeue one packet input queue to be - * able to use it to receive packets. - */ - odp_stash_t pktin_queue_stash; - } pktin; - - /** Packet output and related resources */ - struct { - /** Number of pktio output queues per interface */ - int num_queues[IF_MAX_NUM]; - - /** All pktio output queues used, per interface */ - odp_queue_t queues[IF_MAX_NUM][PKTIO_MAX_OUT_QUEUES]; - - /** A stash that contains the shared tx_burst[][] entries. - * Used when draining the available tx-burst buffers - */ - odp_stash_t tx_burst_stash; - } pktout; - - /** Info about the em-odp queues configured for pktio, store in hash */ - rx_pkt_queue_t rx_pkt_queues[MAX_RX_PKT_QUEUES]; - - /** Pkt lookup table, lookup destination em-odp queue for Rx pkts */ - struct { - odph_table_ops_t ops; - odph_table_t tbl; - int tbl_idx; - odp_ticketlock_t lock; - } tbl_lookup; - - /** Tx burst buffers per interface */ - tx_burst_t tx_burst[IF_MAX_NUM][MAX_TX_BURST_BUFS] ODP_ALIGNED_CACHE; -} pktio_shm_t; - -/** - * @brief Pktio core-local memory - * - * Collection of core local (not shared) data used by pktio Rx&Tx - */ -typedef struct { - /** Event contains the currently used pktio input queue */ - odp_event_t pktin_queue_event; - /** Determine need for timed drain of pktio Tx queues */ - uint64_t tx_prev_cycles; - /** Array of hash keys for the current received Rx pkt burst */ - pkt_q_hash_key_t keys[MAX_PKT_BURST_RX]; - /** Array of positions into rx_qbursts[], filled from hash lookup */ - int positions[MAX_PKT_BURST_RX]; - /** Grouping of Rx pkts per destination em-odp queue */ - rx_queue_burst_t rx_qbursts[MAX_RX_PKT_QUEUES + 1]; /* +1=default Q */ - /** Temporary storage of Tx pkt burst */ - odp_event_t ev_burst[MAX_PKT_BURST_TX]; -} pktio_locm_t; - -/** - * Reserve shared memory for pktio - * - * Must be called once at startup. Additionally each EM-core needs to call the - * pktio_mem_lookup() function before using any further pktio resources. - */ -void pktio_mem_reserve(void); -/** - * Lookup shared memory for pktio - * - * Must be called once by each EM-core before using any further pktio resources. - * - * @param is_thread_per_core true: EM running in thread-per-core mode - * false: EM running in process-per-core mode - */ -void pktio_mem_lookup(bool is_thread_per_core); - -void pktio_mem_free(void); - -void pktio_pool_create(int if_count, bool pktpool_em); -void pktio_pool_destroy(bool pktpool_em); - -void pktio_init(const appl_conf_t *appl_conf); -void pktio_deinit(const appl_conf_t *appl_conf); - -int pktio_create(const char *dev, int num_workers, pktin_mode_t in_mode); -void pktio_start(void); -void pktio_halt(void); -void pktio_stop(void); -void pktio_close(void); - -/** - * @brief Poll input resources for pkts/events in DIRECT_RECV-mode - * and enqueue into EM queues. - * - * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on - * each core in the dispatch loop. - * The function is of type 'em_input_poll_func_t' - * - * @return number of pkts/events received from input and enqueued into EM - */ -int pktin_pollfn_direct(void); - -/** - * @brief Poll input resources for pkts/events in PLAIN_QUEUE-mode - * and enqueue into EM queues. - * - * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on - * each core in the dispatch loop. - * The function is of type 'em_input_poll_func_t' - * - * @return number of pkts/events received from input and enqueued into EM - */ -int pktin_pollfn_plainqueue(void); - -/** - * @brief Drain buffered output - ensure low rate flows are also sent out. - * - * Useful in situations where output is buffered and sent out in bursts when - * enough output has been gathered - single events or low rate flows may, - * without this function, never be sent out (or too late) if the buffering - * threshold has not been reached. - * - * Given to EM via 'em_conf.output.output_drain_fn' - EM will call this on - * each core in the dispatch loop. - * The function is of type 'em_output_drain_func_t' - * - * @return number of events successfully drained and sent for output - */ -int pktout_drainfn(void); - -/** - * @brief User provided EM output-queue callback function ('em_output_func_t') - * - * Transmit events(pkts) using the given config onto Eth-tx - * - * Buffers the given 'events' in a Tx burst buffer and when full transmits - * the whole burst from the buffer at once. - * - * @param events[] Events to be sent - * @param num Number of entries in 'events[]' - * @param output_queue EM output queue the events were sent into (em_send*()) - * @param output_fn_args Function args specific to the output-queue - * Note: here it will be a 'pktio_tx_fn_args_t' pointer - * - * @return number of events successfully sent (equal to num if all successful) - */ -int pktio_tx(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args); -/** - * @typedef pktio_tx_fn_args_t - * User defined arguments to the EM output queue callback function - */ -typedef struct { - /** Pktio Tx interface ID */ - int if_id; - /* add more if needed */ -} pktio_tx_fn_args_t; - -/** - * Associate an EM-queue with a packet-I/O flow. - * - * Received packets matching the set destination IP-addr/port - * will end up in the EM-queue 'queue'. - */ -void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, - em_queue_t queue); - -/** - * Remove the association between a packet-IO flow and an EM-queue. - * - * No further received frames will end up in the EM-queue 'queue' - */ -void pktio_rem_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, - em_queue_t queue); - -/** - * Set the default EM-queue for packet I/O - */ -int pktio_default_queue(em_queue_t queue); - -/** - * Provide applications a way to do a hash-lookup (e.g. sanity check etc.) - */ -em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, - uint16_t l4_port_dst); - -odp_pool_t pktio_pool_get(void); - -static inline odp_packet_t -pktio_odp_packet_get(em_event_t em_event) -{ - return odp_packet_from_event(em_odp_event2odp(em_event)); -} - -static inline em_event_t -pktio_em_event_get(odp_packet_t odp_pkt) -{ - return em_odp_event2em(odp_packet_to_event(odp_pkt)); -} - -static inline uint8_t * -pktio_get_frame(em_event_t event) -{ - odp_packet_t pkt = pktio_odp_packet_get(event); - - return odp_packet_data(pkt); -} - -static inline uint32_t -pktio_get_frame_len(em_event_t event) -{ - odp_packet_t pkt = pktio_odp_packet_get(event); - - return odp_packet_len(pkt); -} - -static inline int -pktio_input_port(em_event_t event) -{ - odp_packet_t pkt = pktio_odp_packet_get(event); - int input_port = odp_packet_input_index(pkt); - - if (unlikely(input_port < 0)) - return 0; - - return input_port; -} - -/** - * Get the protocol, IPv4 destination address and destination L4 port the - * packet-event was sent to. - */ -static inline void -pktio_get_dst(em_event_t event, uint8_t *proto__out, - uint32_t *ipv4_dst__out, uint16_t *l4_port_dst__out) -{ - odp_packet_t pkt = pktio_odp_packet_get(event); - void *pkt_data; - odph_ipv4hdr_t *ip; - odph_udphdr_t *udp; - - /* if (odp_packet_has_ipv4(pkt)) { - * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); - * *proto__out = ip->proto; - * *ipv4_dst__out = ntohl(ip->dst_addr); - * } else { - * *proto__out = 0; - * *ipv4_dst__out = 0; - * } - * - * if (odp_packet_has_udp(pkt)) { - * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); - * *port_dst__out = ntohs(udp->dst_port); - * } else { - * *port_dst__out = 0; - * } - */ - - /* Note: no actual checks if the headers are present */ - pkt_data = odp_packet_data(pkt); - ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + - sizeof(odph_ethhdr_t)); - udp = (odph_udphdr_t *)((uintptr_t)ip + - sizeof(odph_ipv4hdr_t)); - - *proto__out = ip->proto; - *ipv4_dst__out = ntohl(ip->dst_addr); - *l4_port_dst__out = ntohs(udp->dst_port); -} - -static inline void -pktio_swap_addrs(em_event_t event) -{ - odp_packet_t pkt = pktio_odp_packet_get(event); - void *pkt_data; - odph_ethhdr_t *eth; - odph_ethaddr_t eth_tmp_addr; - odph_ipv4hdr_t *ip; - odp_u32be_t ip_tmp_addr; - odph_udphdr_t *udp; - odp_u16be_t udp_tmp_port; - - /* - * if (odp_packet_has_eth(pkt)) { - * eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL); - * eth_tmp_addr = eth->dst; - * eth->dst = eth->src; - * eth->src = eth_tmp_addr; - * } - * - * if (odp_packet_has_ipv4(pkt)) { - * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); - * ip_tmp_addr = ip->src_addr; - * ip->src_addr = ip->dst_addr; - * ip->dst_addr = ip_tmp_addr; - * } - * - * if (odp_packet_has_udp(pkt)) { - * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); - * udp_tmp_port = udp->src_port; - * udp->src_port = udp->dst_port; - * udp->dst_port = udp_tmp_port; - * } - */ - - /* Note: no actual checks if headers are present */ - pkt_data = odp_packet_data(pkt); - eth = (odph_ethhdr_t *)pkt_data; - ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + - sizeof(odph_ethhdr_t)); - udp = (odph_udphdr_t *)((uintptr_t)ip + - sizeof(odph_ipv4hdr_t)); - eth_tmp_addr = eth->dst; - eth->dst = eth->src; - eth->src = eth_tmp_addr; - - ip_tmp_addr = ip->src_addr; - ip->src_addr = ip->dst_addr; - ip->dst_addr = ip_tmp_addr; - - udp_tmp_port = udp->src_port; - udp->src_port = udp->dst_port; - udp->dst_port = udp_tmp_port; -} - -static inline em_event_t -pktio_copy_event(em_event_t event) -{ - return em_event_clone(event, EM_POOL_UNDEF); -} - -/** - * Convert an IP-address to ascii string format. - */ -static inline void -ipaddr_tostr(uint32_t ip_addr, char *const ip_addr_str__out, int strlen) -{ - unsigned char *const ucp = (unsigned char *)&ip_addr; - -#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN - snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", - ucp[3] & 0xff, ucp[2] & 0xff, ucp[1] & 0xff, ucp[0] & 0xff); -#elif ODP_BYTE_ORDER == ODP_BIG_ENDIAN - snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", - ucp[0] & 0xff, ucp[1] & 0xff, ucp[2] & 0xff, ucp[3] & 0xff); -#else - #error ODP_BYTE_ORDER invalid -#endif - - ip_addr_str__out[strlen - 1] = '\0'; -} - -#ifdef __cplusplus -} -#endif - -#endif /* CM_PKTIO_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CM_PKTIO_H +#define CM_PKTIO_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + +#include +#include +#include + +#define IPV4_PROTO_UDP ODPH_IPPROTO_UDP + +/** + * @def PKTIO_MAX_IN_QUEUES + * @brief Maximum number of odp pktio input queues per interface + */ +#define PKTIO_MAX_IN_QUEUES 32 + +/** + * @def PKTIO_MAX_OUT_QUEUES + * @brief Maximum number of odp pktio output queues per interface + */ +#define PKTIO_MAX_OUT_QUEUES 16 + +/** + * @def MAX_PKT_BURST_RX + * @brief Maximum number of packets received from a pktio input queue + * in one burst in polled pktin-mode (DIRECT_RECV, PLAIN_QUEUE) + */ +#define MAX_PKT_BURST_RX 32 + +/** + * @def MAX_PKT_BURST_TX + * @brief Maximum number of packets bursted onto a pktout queue + */ +#define MAX_PKT_BURST_TX 32 + +/** + * @def MAX_TX_BURST_BUFS + * @brief Maximum number of tx burst buffers per interface + * + * Store Tx pkts in output buffers until a buffer has 'MAX_PKT_BURST_TX' pkts, + * then transmit the whole burst of pkts instead of one by one. + */ +#define MAX_TX_BURST_BUFS EM_MAX_CORES + +/** + * @def MAX_RX_PKT_QUEUES + * @brief + */ +#define MAX_RX_PKT_QUEUES (4 * 64) + +/** + * @def MAX_RX_POLL_ROUNDS + * @brief + */ +#define MAX_RX_POLL_ROUNDS 4 + +/** + * @def BURST_TX_DRAIN + * @brief The number of core cycles between timed TX buf drain operations + */ +#define BURST_TX_DRAIN (400000ULL) /* around 200us at 2 Ghz */ + +/** Ethernet MAC address */ +typedef union { + uint8_t u8[6]; + uint16_t u16[3]; +} mac_addr_t; + +/** IPv4 address */ +typedef union { + uint8_t u8[4]; + uint16_t u16[2]; + uint32_t u32; +} ipv4_addr_t; + +/** + * @brief pkt header fields to use as hash key + * + * Fields from incoming packets used for destination em-odp queue lookup. + */ +struct pkt_dst_tuple { + /* uint32_t ip_src;*/ + uint32_t ip_dst; + /* uint16_t port_src;*/ + uint16_t port_dst; + uint16_t proto; +} __attribute__((__packed__)); + +/** Use the struct pkt_dst_tuple as hash key for em-odp queue lookups */ +typedef struct pkt_dst_tuple pkt_q_hash_key_t; + +/* Keep size multiple of 32-bits for faster hash-crc32 calculation*/ +ODP_STATIC_ASSERT(sizeof(pkt_q_hash_key_t) % sizeof(uint32_t) == 0, + "HASH_KEY_NOT_MULTIP_OF_32__ERROR"); + +/** + * @brief Info about em-odp queue to use, returned by hash lookup + * + * Information about an em-odp queue used for pktio, stored in a hash table and + * used when doing a tbl lookup to determine the destination em-odp queue + * for a received packet. + */ +typedef struct { + int pos; + em_queue_t queue; +} rx_pkt_queue_t; + +/** + * @brief Tx pkt burst buffer + * + * Buffer up to 'MAX_PKT_BURST_TX' pkts before bursting them all onto + * the associated 'pktout_queue' at once. + */ +typedef struct tx_burst { + /** store tx pkts temporaily in 'queue' before bursting onto tx */ + odp_queue_t queue ODP_ALIGNED_CACHE; + /** count the number of events in 'queue', updated atomically */ + odp_atomic_u64_t cnt; + /** lock needed when dequeueing from 'queue' */ + odp_spinlock_t lock; + /** store the output interface port also here for easy access */ + int if_port; + /** Transmit burst using this pktout_queue */ + odp_queue_t pktout_queue; +} tx_burst_t; + +/** + * @brief Rx pkt storage for pkts destined to the same em-odp queue + * + * Temporary storage for events to be enqueued onto the _same_ queue + * after receiving a packet burst on Rx + */ +typedef struct { + int sent; + int pkt_cnt; + em_queue_t queue; + odp_packet_t pkt_tbl[MAX_PKT_BURST_RX]; +} rx_queue_burst_t; + +/** + * @brief Pktio shared memory + * + * Collection of shared data used by pktio Rx&Tx + */ +typedef struct { + /** flag set after pktio_start() - prevent pkio rx&tx before started */ + int pktio_started; + + /** Default queue to use for incoming pkts without a dedicated queue */ + em_queue_t default_queue; + + struct { + /** EM pool for pktio, only used with '--pktpool-em' option */ + em_pool_t pktpool_em; + + /** ODP pool for pktio: + * 1. Subpool of 'pktpool_em' when using '--pktpool-em' option + * or + * 2. Direct ODP pkt pool when using '--pktpool-odp' option + */ + odp_pool_t pktpool_odp; + + /** EM vector pool for pktio, only used with '--pktin-vector' option */ + em_pool_t vecpool_em; + /** ODP vector pool for pktio: + * 1. Subpool of 'vecpool_em' when using '--vecpool-em' option + * or + * 2. Direct ODP vector pool when using '--vecpool-odp' option + */ + odp_pool_t vecpool_odp; + } pools; + + /** Packet I/O Interfaces */ + struct { + /** The number of pktio interfaces used */ + int count; + /** Interfaces created so far (up to '.count'), startup only */ + int num_created; + /** Interface indexes used */ + int idx[IF_MAX_NUM]; + /** ODP pktio handles, .pktio_hdl[idx] corresponds to idx=.idx[i] */ + odp_pktio_t pktio_hdl[IF_MAX_NUM]; + } ifs; + + /** Packet input and related resources */ + struct { + /* Packet input mode */ + pktin_mode_t in_mode; + + /** Number of input queues per interface */ + int num_queues[IF_MAX_NUM]; + + /** pktin queues used in DIRECT_RECV-mode, per interface */ + odp_pktin_queue_t pktin_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + + /** plain event queues used in PLAIN_QUEUE-mode, per interface */ + odp_queue_t plain_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + + /** scheduled event queues used in SCHED_...-mode, per interface */ + odp_queue_t sched_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + /** scheduled EM event queues created from sched_queues[][] above */ + em_queue_t sched_em_queues[IF_MAX_NUM][PKTIO_MAX_IN_QUEUES]; + + /** A queue that contains pointers to the shared + * pktin_queues[][] in DIRECT_RECV-mode or to the shared + * plain_queues[][] in PLAIN_QUEUE-mode. + * Each core needs to dequeue one packet input queue to be + * able to use it to receive packets. + */ + odp_stash_t pktin_queue_stash; + } pktin; + + /** Packet output and related resources */ + struct { + /** Number of pktio output queues per interface */ + int num_queues[IF_MAX_NUM]; + + /** All pktio output queues used, per interface */ + odp_queue_t queues[IF_MAX_NUM][PKTIO_MAX_OUT_QUEUES]; + + /** A stash that contains the shared tx_burst[][] entries. + * Used when draining the available tx-burst buffers + */ + odp_stash_t tx_burst_stash; + } pktout; + + /** Info about the em-odp queues configured for pktio, store in hash */ + rx_pkt_queue_t rx_pkt_queues[MAX_RX_PKT_QUEUES]; + + /** Pkt lookup table, lookup destination em-odp queue for Rx pkts */ + struct { + odph_table_ops_t ops; + odph_table_t tbl; + int tbl_idx; + odp_ticketlock_t lock; + } tbl_lookup; + + /** Tx burst buffers per interface */ + tx_burst_t tx_burst[IF_MAX_NUM][MAX_TX_BURST_BUFS] ODP_ALIGNED_CACHE; +} pktio_shm_t; + +/** + * @brief Pktio core-local memory + * + * Collection of core local (not shared) data used by pktio Rx&Tx + */ +typedef struct { + /** Event contains the currently used pktio input queue */ + odp_event_t pktin_queue_event; + /** Determine need for timed drain of pktio Tx queues */ + uint64_t tx_prev_cycles; + /** Array of hash keys for the current received Rx pkt burst */ + pkt_q_hash_key_t keys[MAX_PKT_BURST_RX]; + /** Array of positions into rx_qbursts[], filled from hash lookup */ + int positions[MAX_PKT_BURST_RX]; + /** Grouping of Rx pkts per destination em-odp queue */ + rx_queue_burst_t rx_qbursts[MAX_RX_PKT_QUEUES + 1]; /* +1=default Q */ + /** Temporary storage of Tx pkt burst */ + odp_event_t ev_burst[MAX_PKT_BURST_TX]; +} pktio_locm_t; + +/** + * Reserve shared memory for pktio + * + * Must be called once at startup. Additionally each EM-core needs to call the + * pktio_mem_lookup() function before using any further pktio resources. + */ +void pktio_mem_reserve(void); + +/** + * Lookup shared memory for pktio + * + * Must be called once by each EM-core before using any further pktio resources. + * + * @param is_thread_per_core true: EM running in thread-per-core mode + * false: EM running in process-per-core mode + */ +void pktio_mem_lookup(bool is_thread_per_core); + +void pktio_mem_free(void); + +void pktio_pool_create(int if_count, bool pktpool_em, + bool pktin_vector, bool vecpool_em); +void pktio_pool_destroy(bool pktpool_em, bool pktin_vector, bool vecpool_em); + +void pktio_init(const appl_conf_t *appl_conf); +void pktio_deinit(const appl_conf_t *appl_conf); + +int pktio_create(const char *dev, pktin_mode_t in_mode, bool pktin_vector, + int if_count, int num_workers); +void pktio_start(void); +void pktio_halt(void); +void pktio_stop(void); +void pktio_close(void); + +const char *pktin_mode_str(pktin_mode_t in_mode); +bool pktin_polled_mode(pktin_mode_t in_mode); +bool pktin_sched_mode(pktin_mode_t in_mode); + +/** + * @brief Poll input resources for pkts/events in DIRECT_RECV-mode + * and enqueue into EM queues. + * + * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on + * each core in the dispatch loop. + * The function is of type 'em_input_poll_func_t' + * + * @return number of pkts/events received from input and enqueued into EM + */ +int pktin_pollfn_direct(void); + +/** + * @brief Poll input resources for pkts/events in PLAIN_QUEUE-mode + * and enqueue into EM queues. + * + * Given to EM via 'em_conf.input.input_poll_fn' - EM will call this on + * each core in the dispatch loop. + * The function is of type 'em_input_poll_func_t' + * + * @return number of pkts/events received from input and enqueued into EM + */ +int pktin_pollfn_plainqueue(void); + +/** + * @brief Drain buffered output - ensure low rate flows are also sent out. + * + * Useful in situations where output is buffered and sent out in bursts when + * enough output has been gathered - single events or low rate flows may, + * without this function, never be sent out (or too late) if the buffering + * threshold has not been reached. + * + * Given to EM via 'em_conf.output.output_drain_fn' - EM will call this on + * each core in the dispatch loop. + * The function is of type 'em_output_drain_func_t' + * + * @return number of events successfully drained and sent for output + */ +int pktout_drainfn(void); + +/** + * @brief User provided EM output-queue callback function ('em_output_func_t') + * + * Transmit events(pkts) using the given config onto Eth-tx + * + * Buffers the given 'events' in a Tx burst buffer and when full transmits + * the whole burst from the buffer at once. + * + * @param events[] Events to be sent + * @param num Number of entries in 'events[]' + * @param output_queue EM output queue the events were sent into (em_send*()) + * @param output_fn_args Function args specific to the output-queue + * Note: here it will be a 'pktio_tx_fn_args_t' pointer + * + * @return number of events successfully sent (equal to num if all successful) + */ +int pktio_tx(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args); +/** + * @typedef pktio_tx_fn_args_t + * User defined arguments to the EM output queue callback function + */ +typedef struct { + /** Pktio Tx interface ID */ + int if_id; + /* add more if needed */ +} pktio_tx_fn_args_t; + +/** + * Associate an EM-queue with a packet-I/O flow. + * + * Received packets matching the set destination IP-addr/port + * will end up in the EM-queue 'queue'. + */ +void pktio_add_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, + em_queue_t queue); + +/** + * Remove the association between a packet-IO flow and an EM-queue. + * + * No further received frames will end up in the EM-queue 'queue' + */ +void pktio_rem_queue(uint8_t proto, uint32_t ipv4_dst, uint16_t l4_port_dst, + em_queue_t queue); + +/** + * Set the default EM-queue for packet I/O + */ +int pktio_default_queue(em_queue_t queue); + +/** + * Provide applications a way to do a hash-lookup (e.g. sanity check etc.) + */ +em_queue_t pktio_lookup_sw(uint8_t proto, uint32_t ipv4_dst, + uint16_t l4_port_dst); + +odp_pool_t pktio_pool_get(void); + +static inline odp_packet_t +pktio_odp_packet_get(em_event_t em_event) +{ + return odp_packet_from_event(em_odp_event2odp(em_event)); +} + +static inline em_event_t +pktio_em_event_get(odp_packet_t odp_pkt) +{ + return em_odp_event2em(odp_packet_to_event(odp_pkt)); +} + +static inline uint8_t * +pktio_get_frame(em_event_t event) +{ + odp_packet_t pkt = pktio_odp_packet_get(event); + + return odp_packet_data(pkt); +} + +static inline uint32_t +pktio_get_frame_len(em_event_t event) +{ + odp_packet_t pkt = pktio_odp_packet_get(event); + + return odp_packet_len(pkt); +} + +static inline int +pktio_input_port(em_event_t event) +{ + odp_packet_t pkt = pktio_odp_packet_get(event); + int input_port = odp_packet_input_index(pkt); + + if (unlikely(input_port < 0)) + return 0; + + return input_port; +} + +/** + * Get the protocol, IPv4 destination address and destination L4 port the + * packet-event was sent to. + */ +static inline void +pktio_get_dst(em_event_t event, uint8_t *proto__out, + uint32_t *ipv4_dst__out, uint16_t *l4_port_dst__out) +{ + odp_packet_t pkt = pktio_odp_packet_get(event); + void *pkt_data; + odph_ipv4hdr_t *ip; + odph_udphdr_t *udp; + + /* if (odp_packet_has_ipv4(pkt)) { + * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); + * *proto__out = ip->proto; + * *ipv4_dst__out = ntohl(ip->dst_addr); + * } else { + * *proto__out = 0; + * *ipv4_dst__out = 0; + * } + * + * if (odp_packet_has_udp(pkt)) { + * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); + * *port_dst__out = ntohs(udp->dst_port); + * } else { + * *port_dst__out = 0; + * } + */ + + /* Note: no actual checks if the headers are present */ + pkt_data = odp_packet_data(pkt); + ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + + sizeof(odph_ethhdr_t)); + udp = (odph_udphdr_t *)((uintptr_t)ip + + sizeof(odph_ipv4hdr_t)); + + *proto__out = ip->proto; + *ipv4_dst__out = ntohl(ip->dst_addr); + *l4_port_dst__out = ntohs(udp->dst_port); +} + +static inline void +pktio_swap_eth_addrs(em_event_t event) +{ + odp_packet_t pkt = pktio_odp_packet_get(event); + + odph_ethhdr_t *const eth = odp_packet_data(pkt); + const odph_ethaddr_t eth_tmp_addr = eth->dst; + + eth->dst = eth->src; + eth->src = eth_tmp_addr; +} + +static inline void +pktio_swap_addrs(em_event_t event) +{ + odp_packet_t pkt = pktio_odp_packet_get(event); + void *pkt_data; + odph_ethhdr_t *eth; + odph_ethaddr_t eth_tmp_addr; + odph_ipv4hdr_t *ip; + odp_u32be_t ip_tmp_addr; + odph_udphdr_t *udp; + odp_u16be_t udp_tmp_port; + + /* + * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L2 + * if (odp_packet_has_eth(pkt)) { + * eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL); + * eth_tmp_addr = eth->dst; + * eth->dst = eth->src; + * eth->src = eth_tmp_addr; + * } + * + * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L3 + * if (odp_packet_has_ipv4(pkt)) { + * ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL); + * ip_tmp_addr = ip->src_addr; + * ip->src_addr = ip->dst_addr; + * ip->dst_addr = ip_tmp_addr; + * } + * + * Needs odp_pktio_config_t::parser.layer = ODP_PROTO_LAYER_L4 + * if (odp_packet_has_udp(pkt)) { + * udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL); + * udp_tmp_port = udp->src_port; + * udp->src_port = udp->dst_port; + * udp->dst_port = udp_tmp_port; + * } + */ + + /* Note: no actual checks if headers are present */ + pkt_data = odp_packet_data(pkt); + eth = (odph_ethhdr_t *)pkt_data; + ip = (odph_ipv4hdr_t *)((uintptr_t)pkt_data + + sizeof(odph_ethhdr_t)); + udp = (odph_udphdr_t *)((uintptr_t)ip + + sizeof(odph_ipv4hdr_t)); + eth_tmp_addr = eth->dst; + eth->dst = eth->src; + eth->src = eth_tmp_addr; + + ip_tmp_addr = ip->src_addr; + ip->src_addr = ip->dst_addr; + ip->dst_addr = ip_tmp_addr; + + udp_tmp_port = udp->src_port; + udp->src_port = udp->dst_port; + udp->dst_port = udp_tmp_port; +} + +static inline em_event_t +pktio_copy_event(em_event_t event) +{ + return em_event_clone(event, EM_POOL_UNDEF); +} + +/** + * Convert an IP-address to ascii string format. + */ +static inline void +ipaddr_tostr(uint32_t ip_addr, char *const ip_addr_str__out, int strlen) +{ + unsigned char *const ucp = (unsigned char *)&ip_addr; + +#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN + snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", + ucp[3] & 0xff, ucp[2] & 0xff, ucp[1] & 0xff, ucp[0] & 0xff); +#elif ODP_BYTE_ORDER == ODP_BIG_ENDIAN + snprintf(ip_addr_str__out, strlen, "%d.%d.%d.%d", + ucp[0] & 0xff, ucp[1] & 0xff, ucp[2] & 0xff, ucp[3] & 0xff); +#else + #error ODP_BYTE_ORDER invalid +#endif + + ip_addr_str__out[strlen - 1] = '\0'; +} + +#ifdef __cplusplus +} +#endif + +#endif /* CM_PKTIO_H */ diff --git a/programs/common/cm_pool_config.h b/programs/common/cm_pool_config.h index 5b9c4c73..c5e33627 100644 --- a/programs/common/cm_pool_config.h +++ b/programs/common/cm_pool_config.h @@ -1,57 +1,56 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2017, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef CM_POOL_CONFIG_H -#define CM_POOL_CONFIG_H - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * Note: DEFAULT_POOL_ID = EM_POOL_DEFAULT, - * configuration given in em_conf_t to em_init(). - * The name of the default EM event pool is EM_POOL_DEFAULT_NAME (="default") - */ - -#define APPL_POOL_1 ((em_pool_t)2) -#define APPL_POOL_1_NAME "appl_pool_1" - -/* - * #define APPL_POOL_2 ((em_pool_t)3) - * #define APPL_POOL_2_NAME "appl_pool_2" - */ - -#ifdef __cplusplus -} -#endif - -#endif /* CM_POOL_CONFIG_H */ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2017, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CM_POOL_CONFIG_H +#define CM_POOL_CONFIG_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Note: DEFAULT_POOL_ID = EM_POOL_DEFAULT, + * configuration given in em_conf_t to em_init(). + * The name of the default EM event pool is EM_POOL_DEFAULT_NAME (="default") + */ + +#define APPL_POOL_1_NAME "appl_pool_1" + +/* + * #define APPL_POOL_2 ((em_pool_t)3) + * #define APPL_POOL_2_NAME "appl_pool_2" + */ + +#ifdef __cplusplus +} +#endif + +#endif /* CM_POOL_CONFIG_H */ diff --git a/programs/common/cm_setup.c b/programs/common/cm_setup.c index 87a7bb6f..c6c38c18 100644 --- a/programs/common/cm_setup.c +++ b/programs/common/cm_setup.c @@ -1,1532 +1,1608 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine common initialization functions - * - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "cm_setup.h" -#include "cm_pool_config.h" -#include "cm_pktio.h" - -/** - * @def USAGE_FMT - * Usage help string - */ -#define USAGE_FMT \ -"\n" \ -"Usage: %s APPL&EM-OPTIONS\n" \ -" E.g. %s -c 0xfe -p\n" \ -"\n" \ -"Event Machine (EM) example application.\n" \ -"\n" \ -"Mandatory EM-OPTIONS:\n" \ -" -c, --coremask Select the cores to use, hexadecimal\n" \ -" -p, --process-per-core Running EM with one process per core.\n" \ -" -t, --thread-per-core Running EM with one thread per core.\n" \ -" Select EITHER -p OR -t, but not both!\n" \ -"\n" \ -"Optional [APPL&EM-OPTIONS]\n" \ -" -d, --device-id Device-id, hexadecimal (default: 0x0)\n" \ -" -r, --dispatch-rounds Number of dispatch rounds (for testing)\n" \ -" -s, --startup-mode Application startup mode (for testing):\n" \ -" 0: Start-up & init all EM cores before appl-setup (default)\n" \ -" 1: Start-up & init only one EM core before appl-setup,\n" \ -" the rest of the EM-cores are init only after that.\n" \ -"Packet-IO\n" \ -" -m, --pktin-mode Select the packet-input mode to use:\n" \ -" 0: Direct mode: PKTIN_MODE_DIRECT (default)\n" \ -" 1: Plain queue mode: PKTIN_MODE_QUEUE\n" \ -" -i, --eth-interface Select the ethernet interface(s) to use\n" \ -" -e, --pktpool-em Packet-io pool is an EM-pool (default)\n" \ -" -o, --pktpool-odp Packet-io pool is an ODP-pool\n" \ -" Select EITHER -e OR -o, but not both!\n" \ -"Help\n" \ -" -h, --help Display help and exit.\n" \ -"\n" - -/** - * Stored command line arguments given at startup - * - * @see USAGE_FMT - */ -typedef struct { - /** EM cmd line args */ - struct { - /** EM device id */ - uint16_t device_id; - /** RunMode: EM run with a thread per core */ - int thread_per_core; - /** RunMode: EM run with a process per core */ - int process_per_core; - /** Number of EM-cores (== nbr of EM-threads or EM-processes) */ - int core_count; - /** Physical core mask, exact listing of cores for EM */ - em_core_mask_t phys_mask; - } args_em; - - /** Application cmd line args */ - struct { - /** Application name */ - char name[APPL_NAME_LEN]; - /** Start-up mode */ - startup_mode_t startup_mode; - /** Dispatch rounds before returning */ - uint32_t dispatch_rounds; - /** Packet I/O parameters */ - struct { - /** Packet input mode */ - pktin_mode_t in_mode; - /** Interface count */ - int if_count; - /** Interface names + placeholder for '\0' */ - char if_name[IF_MAX_NUM][IF_NAME_LEN + 1]; - /** Pktio is setup with an EM event-pool */ - bool pktpool_em; - /** Pktio is setup with an ODP pkt-pool */ - bool pktpool_odp; - } pktio; - } args_appl; -} parse_args_t; - -/** - * CPU config to be used - */ -typedef struct { - /** Number of CPUs to run EM-cores */ - int num_worker; - /** Worker_mask specifying cores for EM */ - odp_cpumask_t worker_mask; -} cpu_conf_t; - -/** - * Dispatch rounds for em_dispatch() during start-up to properly sync the - * cores to enter the main dispatch loop at roughly the same time. - */ -#define STARTUP_DISPATCH_ROUNDS 16 - -/** - * Dispatch rounds for em_dispatch() during program execution to regularly - * return from dipatch and inspect the 'appl_shm->exit_flag' value. Program - * termination will begin once a set 'appl_shm->exit_flags' has been noticed. - */ -#define EXIT_CHECK_DISPATCH_ROUNDS 20000 - -/** - * Dispatch rounds for em_dispatch() during termination to properly sync the - * cores and shutdown actions and allow for a graceful shutdown. - */ -#define TERM_DISPATCH_ROUNDS 16 - -static void -parse_args(int argc, char *argv[], parse_args_t *parse_args /* out */); - -static void -verify_cpu_setup(const parse_args_t *parsed, - cpu_conf_t *cpu_conf /* out */); - -static odp_instance_t -init_odp(const parse_args_t *parsed, const cpu_conf_t *cpu_conf); - -static void -init_sync(sync_t *const sync, int num_cpus); - -static void -init_em(const parse_args_t *parsed, em_conf_t *em_conf /* out */); - -static void -init_appl_conf(const parse_args_t *parsed, appl_conf_t *appl_conf /* out */); - -static void -create_pktio(appl_conf_t *appl_conf/*in/out*/, const cpu_conf_t *cpu_conf); -static void -term_pktio(const appl_conf_t *appl_conf); - -static int -create_odp_threads(odp_instance_t instance, - const parse_args_t *parsed, const cpu_conf_t *cpu_conf, - int (*start_fn)(void *fn_arg), void *fn_arg, - odph_thread_t thread_tbl[/*out*/]); -static int -run_core_fn(void *arg); - -static void -install_sig_handler(int signum, void (*sig_handler)(int), int flags); - -static void -sigchld_handler(int sig ODP_UNUSED); -static void -sigint_handler(int signo ODP_UNUSED); - -static void -usage(char *progname) -{ - APPL_PRINT(USAGE_FMT, NO_PATH(progname), NO_PATH(progname)); -} - -/** - * Global pointer to common application shared memory - */ -appl_shm_t *appl_shm; - -/** - * Common setup function for em-odp example programs - */ -int cm_setup(int argc, char *argv[]) -{ - /* use unbuffered stdout */ - if (setvbuf(stdout, NULL, _IONBF, 0) != 0) - APPL_EXIT_FAILURE("setvbuf() fails (errno(%i)=%s)", - errno, strerror(errno)); - - /* - * Parse the command line arguments - */ - parse_args_t parsed; /* filled during cmd line arg parsing */ - - memset(&parsed, 0, sizeof(parsed)); - parse_args(argc, argv, &parsed/* out */); - - /* - * Verify the cpu setup and extract the cpu config - */ - cpu_conf_t cpu_conf; - - memset(&cpu_conf, 0, sizeof(cpu_conf)); - verify_cpu_setup(&parsed, &cpu_conf/* out */); - - /* - * Init ODP with given args and cpu setup - * - * Calls odp_init_global() and odp_init_local() for this thread - * before returning. - */ - odp_instance_t instance; - - instance = init_odp(&parsed, &cpu_conf); - - APPL_PRINT("\n" - "*********************************************************\n" - "Setting up EM on ODP-version:\n" - "%s\n" - "*********************************************************\n" - "\n", - odp_version_impl_str()); - - /* - * Setup shared memory - * - * Reserve application shared memory in one chunk. - */ - uint32_t flags = 0; - -#if ODP_VERSION_API_NUM(1, 33, 0) > ODP_VERSION_API - flags |= ODP_SHM_SINGLE_VA; -#else - odp_shm_capability_t shm_capa; - int err = odp_shm_capability(&shm_capa); - - if (unlikely(err)) - APPL_EXIT_FAILURE("shm capability error:%d", err); - - if (shm_capa.flags & ODP_SHM_SINGLE_VA) - flags |= ODP_SHM_SINGLE_VA; -#endif - odp_shm_t shm = odp_shm_reserve("appl_shm", sizeof(appl_shm_t), - ODP_CACHE_LINE_SIZE, flags); - if (unlikely(shm == ODP_SHM_INVALID)) - APPL_EXIT_FAILURE("appl shared mem reservation failed"); - appl_shm = odp_shm_addr(shm); - if (unlikely(appl_shm == NULL)) - APPL_EXIT_FAILURE("obtaining shared mem addr failed"); - memset(appl_shm, 0, sizeof(appl_shm_t)); - - /* - * Initialize application start-up & exit synchronization - */ - sync_t *const sync = &appl_shm->sync; - - init_sync(sync, cpu_conf.num_worker); - - /* - * Init EM with given args - * - * Calls em_init() before returning. - */ - em_conf_t *const em_conf = &appl_shm->em_conf; - - init_em(&parsed, em_conf); - - /* - * Set application conf based on parsed cmd line arguments - */ - appl_conf_t *const appl_conf = &appl_shm->appl_conf; - - init_appl_conf(&parsed, appl_conf); - - /* - * Create packet-I/O, if requested - */ - if (appl_conf->pktio.if_count > 0) - create_pktio(appl_conf/*in/out*/, &cpu_conf); - - /* - * Signal handler for SIGCHLD in process-per-core mode - * - * Create a signal handler for the SIGCHLD signal that is sent - * to the parent process when a forked child process dies. - */ - if (em_conf->process_per_core) - install_sig_handler(SIGCHLD, sigchld_handler, 0); - - /* - * Signal handler for SIGINT (Ctrl-C) - * - * Create a signal handler for the SIGINT (Ctrl-C) signal to flag - * program termination. - * Set the 'SA_RESETHAND'-flag to reset the SIGINT handler to its - * default disposition after the first handling to be able to stop - * execution if the application misbehaves. - */ - install_sig_handler(SIGINT, sigint_handler, SA_RESETHAND); - - /* - * Create the odp-threads to use as EM-cores - * - * Create the odp-threads / EM-cores. Each EM-core will run the - * 'run_core_fn(appl_shm)' function in a thread pinned to a single cpu - * as specified by 'cpu_conf'. - */ - odph_thread_t *const thread_tbl = appl_shm->thread_tbl; - int ret = create_odp_threads(instance, &parsed, &cpu_conf, - run_core_fn /*fn*/, appl_shm /*fn_arg*/, - thread_tbl /*out*/); - if (ret != cpu_conf.num_worker) - APPL_EXIT_FAILURE("ODP thread creation failed:%d", ret); - - /* - * Wait for the created odp-threads / EM-cores to return - */ - ret = odph_thread_join(thread_tbl, cpu_conf.num_worker); - if (ret != cpu_conf.num_worker) - APPL_EXIT_FAILURE("ODP thread join failed:%d", ret); - - /* - * Teardown the application after all the odp-threads / EM-cores - * have ended: - */ - - /* - * Terminate packet-I/O, if set up - */ - if (appl_conf->pktio.if_count > 0) - term_pktio(appl_conf); - - /* - * Terminate EM - * - * All EM-cores have already run em_term_core() - */ - em_status_t stat = em_term(em_conf); - - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_term():%" PRI_STAT "", stat); - - /* - * Free shared memory - */ - ret = odp_shm_free(shm); - if (ret != 0) - APPL_EXIT_FAILURE("appl shared mem free failed:%d", ret); - - /** - * Terminate ODP - */ - ret = odp_term_local(); - if (ret != 0) - APPL_EXIT_FAILURE("Last ODP local term failed:%d", ret); - ret = odp_term_global(instance); - if (ret != 0) - APPL_EXIT_FAILURE("odp_term_global() failed:%d", ret); - - APPL_PRINT("\nDone - exit\n\n"); - - return EXIT_SUCCESS; -} - -static odp_instance_t -init_odp(const parse_args_t *parsed, const cpu_conf_t *cpu_conf) -{ - odp_init_t init_params; - odp_instance_t instance; - int ret; - - /* Initialize the odp init params with 'default' values */ - odp_init_param_init(&init_params); - - /* Restrict odp worker threads to cores set in the 'worker_mask' */ - init_params.num_worker = cpu_conf->num_worker; - init_params.worker_cpus = &cpu_conf->worker_mask; - - /** - * Leave "init_params.control_cpus" unset to use odp default control - * cpus, which are the rest of installed cpus excluding worker cpus - * and CPU 0 when worker cpus don't have CPU 1 set. But if worker cpus - * have CPU 1 set, CPU 0 will be set as a control cpu. - */ - - /* - * List odp features not to be used in the examples. This may optimize - * performance. Note that a real application might need to change this! - */ - init_params.not_used.feat.cls = 1; /* don't use the odp classifier */ - init_params.not_used.feat.compress = 1; /* don't use the odp compress */ - init_params.not_used.feat.crypto = 1; /* don't use odp crypto */ - init_params.not_used.feat.ipsec = 1; /* don't use odp ipsec */ - init_params.not_used.feat.tm = 1; /* don't use the odp traffic manager*/ - - /* - * Set the memory model to use for odp: thread or process. - * parse_args() has verified .thread_per_core vs .process_per_core - */ - if (parsed->args_em.thread_per_core) - init_params.mem_model = ODP_MEM_MODEL_THREAD; - else - init_params.mem_model = ODP_MEM_MODEL_PROCESS; - - ret = odp_init_global(&instance, &init_params, NULL); - if (ret != 0) - APPL_EXIT_FAILURE("ODP global init failed:%d", ret); - - ret = odp_init_local(instance, ODP_THREAD_CONTROL); - if (ret != 0) - APPL_EXIT_FAILURE("ODP local init failed:%d", ret); - - /* Configure the scheduler */ - odp_schedule_config_t sched_config; - - odp_schedule_config_init(&sched_config); - /* EM does not need the ODP predefined scheduling groups */ - sched_config.sched_group.all = 0; - sched_config.sched_group.control = 0; - sched_config.sched_group.worker = 0; - ret = odp_schedule_config(&sched_config); - if (ret != 0) - APPL_EXIT_FAILURE("ODP schedule config failed:%d", ret); - - /* Print ODP system info */ - odp_sys_info_print(); - - return instance; -} - -static void -init_sync(sync_t *const sync, int num_cpus) -{ - odp_barrier_init(&sync->start_barrier, num_cpus); - odp_barrier_init(&sync->exit_barrier, num_cpus); - env_atomic64_init(&sync->exit_count); - env_atomic64_init(&sync->enter_count); -} - -static void -init_em(const parse_args_t *parsed, em_conf_t *em_conf /* out */) -{ - em_status_t stat; - - em_conf_init(em_conf); - - /* Set EM conf based on parsed cmd line arguments */ - em_conf->device_id = parsed->args_em.device_id; - em_conf->thread_per_core = parsed->args_em.thread_per_core; - em_conf->process_per_core = parsed->args_em.process_per_core; - em_conf->core_count = parsed->args_em.core_count; - em_conf->phys_mask = parsed->args_em.phys_mask; - - /* Event-Timer: disable=0, enable=1 */ - em_conf->event_timer = 1; - - /* - * Set the default pool config in em_conf, needed internally by EM - * at startup. - */ - em_pool_cfg_t default_pool_cfg; - - em_pool_cfg_init(&default_pool_cfg); /* mandatory */ - default_pool_cfg.event_type = EM_EVENT_TYPE_SW; - default_pool_cfg.align_offset.in_use = true; /* override config file */ - default_pool_cfg.align_offset.value = 0; /* set explicit '0 bytes' */ - default_pool_cfg.user_area.in_use = true; /* override config file */ - default_pool_cfg.user_area.size = 0; /* set explicit '0 bytes' */ - default_pool_cfg.num_subpools = 4; - default_pool_cfg.subpool[0].size = 256; - default_pool_cfg.subpool[0].num = 16384; - default_pool_cfg.subpool[0].cache_size = 64; - default_pool_cfg.subpool[1].size = 512; - default_pool_cfg.subpool[1].num = 1024; - default_pool_cfg.subpool[1].cache_size = 32; - default_pool_cfg.subpool[2].size = 1024; - default_pool_cfg.subpool[2].num = 1024; - default_pool_cfg.subpool[2].cache_size = 16; - default_pool_cfg.subpool[3].size = 2048; - default_pool_cfg.subpool[3].num = 1024; - default_pool_cfg.subpool[3].cache_size = 8; - - em_conf->default_pool_cfg = default_pool_cfg; - - /* - * User can override the EM default log functions by giving logging - * funcs of their own - here we just use the default (shown explicitly) - */ - em_conf->log.log_fn = NULL; - em_conf->log.vlog_fn = NULL; - - /* Packet-I/O */ - if (parsed->args_appl.pktio.if_count > 0) { - /* - * Request EM to poll input for pkts in the dispatch loop - */ - if (parsed->args_appl.pktio.in_mode == PLAIN_QUEUE) - em_conf->input.input_poll_fn = pktin_pollfn_plainqueue; - else /* DIRECT_RECV */ - em_conf->input.input_poll_fn = pktin_pollfn_direct; - - /* - * Request EM to drain buffered output in the dispatch loop - */ - em_conf->output.output_drain_fn = pktout_drainfn; /* user fn*/ - } - - /* - * Initialize the Event Machine. Every EM core still needs to call - * em_init_core() later. - * Note: the EM default pool config MUST be included in em_conf! - */ - stat = em_init(em_conf); - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_init(), EM error:%" PRI_STAT "", stat); -} - -static void -init_appl_conf(const parse_args_t *parsed, appl_conf_t *appl_conf /* out */) -{ - size_t len = sizeof(appl_conf->name); - - memcpy(appl_conf->name, parsed->args_appl.name, len); - appl_conf->name[len - 1] = '\0'; - - if (parsed->args_em.thread_per_core) { - appl_conf->num_procs = 1; - appl_conf->num_threads = parsed->args_em.core_count; - } else { - appl_conf->num_procs = parsed->args_em.core_count; - appl_conf->num_threads = parsed->args_em.core_count; - } - - appl_conf->dispatch_rounds = parsed->args_appl.dispatch_rounds; - appl_conf->startup_mode = parsed->args_appl.startup_mode; - - /* - * Create the other event pools used by the application. - * Note that em_term() will delete all remaining pools during - * termination. - */ - em_pool_cfg_t appl_pool_1_cfg; - - em_pool_cfg_init(&appl_pool_1_cfg); /* mandatory */ - appl_pool_1_cfg.event_type = EM_EVENT_TYPE_PACKET; - appl_pool_1_cfg.num_subpools = 4; - - appl_pool_1_cfg.subpool[0].size = 256; - appl_pool_1_cfg.subpool[0].num = 16384; - appl_pool_1_cfg.subpool[0].cache_size = 128; - - appl_pool_1_cfg.subpool[1].size = 512; - appl_pool_1_cfg.subpool[1].num = 1024; - appl_pool_1_cfg.subpool[1].cache_size = 64; - - appl_pool_1_cfg.subpool[2].size = 1024; - appl_pool_1_cfg.subpool[2].num = 1024; - appl_pool_1_cfg.subpool[2].cache_size = 32; - - appl_pool_1_cfg.subpool[3].size = 2048; - appl_pool_1_cfg.subpool[3].num = 1024; - appl_pool_1_cfg.subpool[3].cache_size = 16; - - em_pool_t appl_pool = em_pool_create(APPL_POOL_1_NAME, APPL_POOL_1, - &appl_pool_1_cfg); - if (appl_pool == EM_POOL_UNDEF || appl_pool != APPL_POOL_1) - APPL_EXIT_FAILURE("appl pool:%s(%" PRI_POOL ") create failed", - APPL_POOL_1_NAME, APPL_POOL_1); - appl_conf->pools[0] = appl_pool; - appl_conf->num_pools = 1; - - appl_conf->pktio.in_mode = parsed->args_appl.pktio.in_mode; - appl_conf->pktio.if_count = parsed->args_appl.pktio.if_count; - for (int i = 0; i < parsed->args_appl.pktio.if_count; i++) { - memcpy(appl_conf->pktio.if_name[i], - parsed->args_appl.pktio.if_name[i], IF_NAME_LEN + 1); - } - - appl_conf->pktio.pktpool_em = parsed->args_appl.pktio.pktpool_em; -} - -static void -create_pktio(appl_conf_t *appl_conf/*in/out*/, const cpu_conf_t *cpu_conf) -{ - pktio_mem_reserve(); - pktio_pool_create(appl_conf->pktio.if_count, - appl_conf->pktio.pktpool_em); - pktio_init(appl_conf); - /* Create a pktio instance for each interface */ - for (int i = 0; i < appl_conf->pktio.if_count; i++) { - int if_id = pktio_create(appl_conf->pktio.if_name[i], - cpu_conf->num_worker, - appl_conf->pktio.in_mode); - if (unlikely(if_id < 0)) - APPL_EXIT_FAILURE("Cannot create pktio if:%s", - appl_conf->pktio.if_name[i]); - /* Store the interface id */ - appl_conf->pktio.if_ids[i] = if_id; - } -} - -static void -term_pktio(const appl_conf_t *appl_conf) -{ - /* Stop, close and free the pktio resources */ - pktio_stop(); - pktio_close(); - pktio_deinit(appl_conf); - pktio_pool_destroy(appl_conf->pktio.pktpool_em); - pktio_mem_free(); -} - -static int -create_odp_threads(odp_instance_t instance, - const parse_args_t *parsed, const cpu_conf_t *cpu_conf, - int (*start_fn)(void *fn_arg), void *fn_arg, - odph_thread_t thread_tbl[/*out*/]) -{ - odph_thread_common_param_t thr_common; - odph_thread_param_t thr_param; /* same for all thrs */ - int ret; - - /* - * Generate a thread summary for the user - */ - char cpumaskstr[ODP_CPUMASK_STR_SIZE]; - - APPL_PRINT("num worker: %i\n", cpu_conf->num_worker); - - odp_cpumask_to_str(&cpu_conf->worker_mask, cpumaskstr, - sizeof(cpumaskstr)); - APPL_PRINT("worker thread mask: %s\n", cpumaskstr); - - odph_thread_common_param_init(&thr_common); - thr_common.instance = instance; - thr_common.cpumask = &cpu_conf->worker_mask; - /* - * Select between pthreads and processes, - * parse_args() has verified .thread_per_core vs .process_per_core - */ - if (parsed->args_em.thread_per_core) - thr_common.thread_model = 0; /* pthreads */ - else - thr_common.thread_model = 1; /* processes */ - thr_common.sync = 1; /* Synchronize thread start up */ - thr_common.share_param = 1; /* same 'thr_param' for all threads */ - - odph_thread_param_init(&thr_param); - thr_param.start = start_fn; - thr_param.arg = fn_arg; - thr_param.thr_type = ODP_THREAD_WORKER; - - /* - * Create odp worker threads to run as EM-cores - */ - ret = odph_thread_create(thread_tbl /*out*/, - &thr_common, &thr_param, - cpu_conf->num_worker); - return ret; -} - -/** - * @brief Helper to run_core_fn(): - * Start-up and init all EM-cores before application setup - * - * @param sync Application start-up and tear-down synchronization vars - * @param appl_conf Application configuration - */ -static void startup_all_cores(sync_t *sync, appl_conf_t *appl_conf, - bool is_thread_per_core) -{ - em_status_t stat; - int core_id; - uint64_t cores; - - /* - * Initialize this thread of execution (proc, thread), i.e. EM-core - */ - stat = em_init_core(); - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_init_core():%" PRI_STAT ", EM-core:%02d", - stat, em_core_id()); - - odp_barrier_wait(&sync->start_barrier); - - if (appl_conf->pktio.if_count > 0) - pktio_mem_lookup(is_thread_per_core); - - odp_barrier_wait(&sync->start_barrier); - - /* - * EM is ready on this EM-core (= proc, thread or core) - * It is now OK to start creating EOs, queues etc. - * - * Note that only one core needs to create the shared memory, EO's, - * queues etc. needed by the application, all other cores need only - * look up the shared mem and go directly into the em_dispatch()-loop, - * where they are ready to process events as soon as the EOs have been - * started and queues enabled. - */ - core_id = em_core_id(); - cores = (uint64_t)em_core_count(); - - /* Ensure all EM cores can find the default event pool */ - if (em_pool_find(EM_POOL_DEFAULT_NAME) != EM_POOL_DEFAULT) - APPL_EXIT_FAILURE("em_pool_find(%s) c:%d", - EM_POOL_DEFAULT_NAME, core_id); - - if (core_id == 0) { - /* - * Initialize the application and allocate shared memory. - */ - test_init(); - } - - odp_barrier_wait(&sync->start_barrier); - - if (core_id != 0) { - /* Look up the shared memory */ - test_init(); - } - - const char *str = appl_conf->dispatch_rounds == 0 ? - "forever" : "rounds"; - - APPL_PRINT("Entering the event dispatch loop(%s=%d) on EM-core:%02d\n", - str, appl_conf->dispatch_rounds, core_id); - - odp_barrier_wait(&sync->start_barrier); /* to print pretty */ - - /* - * Don't use barriers to sync the cores after this! - * EM synchronous API funcs (e.g. em_eo_start_sync()) blocks until the - * function has completed on all cores - a barrier might hinder a core - * from completing an operation. - */ - - if (core_id == 0) { - /* - * Create and start application EOs, pass the appl_conf. - */ - test_start(appl_conf); - } - - /* - * Keep all cores dispatching until 'test_start()' has been - * completed in order to handle sync-API function calls and to enter - * the main dispatch loop almost at the same time. - */ - env_atomic64_inc(&sync->enter_count); - do { - em_dispatch(STARTUP_DISPATCH_ROUNDS); - if (core_id == 0) { - /* Start pktio if configured */ - if (appl_conf->pktio.if_count > 0) - pktio_start(); - env_atomic64_inc(&sync->enter_count); - } - } while (env_atomic64_get(&sync->enter_count) <= cores); -} - -/** - * @brief Helper to run_core_fn(): - * Start-up and init only one EM-core before application setup. The rest - * of the EM-cores are init only after that. - * - * @param sync Application start-up and tear-down synchronization vars - * @param appl_conf Application configuration - */ -static void startup_one_core_first(sync_t *sync, appl_conf_t *appl_conf, - bool is_thread_per_core) -{ - em_status_t stat; - uint64_t enter_count = env_atomic64_return_add(&sync->enter_count, 1); - - if (enter_count == 0) { - /* - * Initialize first EM-core - */ - stat = em_init_core(); - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_init_core():%" PRI_STAT ", EM-core:%02d", - stat, em_core_id()); - if (appl_conf->pktio.if_count > 0) - pktio_mem_lookup(is_thread_per_core); - - /* Ensure all EM cores can find the default event pool */ - if (em_pool_find(EM_POOL_DEFAULT_NAME) != EM_POOL_DEFAULT) - APPL_EXIT_FAILURE("em_pool_find(%s) c:%d", - EM_POOL_DEFAULT_NAME, em_core_id()); - - /* - * Don't use barriers to sync the cores after this! - * EM synchronous API funcs (e.g. em_eo_start_sync()) blocks until the - * function has completed on all cores - a barrier might hinder a core - * from completing an operation. - */ - } - - odp_barrier_wait(&sync->start_barrier); - - if (enter_count == 0) { - /* - * Initialize the application and allocate shared memory. - */ - test_init(); - /* - * Create and start application EOs, pass the appl_conf. - */ - test_start(appl_conf); - } else { - /* - * Sleep until the first EM-core has completed test_init() and - * test_start() to set up the application. - * Use sleep instead of barrier or lock etc. to avoid deadlock - * in case the first core is using sync-APIs and is waiting for - * completion by the other EM-cores, we need to go into dispatch - * (this is for testing only and NOT the most elegant start-up). - */ - sleep(1); - - /* - * Initialize this thread of execution (proc, thread), i.e. EM-core - */ - stat = em_init_core(); - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_init_core():%" PRI_STAT ", EM-core:%02d", - stat, em_core_id()); - - if (appl_conf->pktio.if_count > 0) - pktio_mem_lookup(is_thread_per_core); - - /* - * EM is ready on this EM-core (= proc, thread or core) - * It is now OK to start creating EOs, queues etc. - * - * Note that only one core needs to create the shared memory, EO's, - * queues etc. needed by the application, all other cores need only - * look up the shared mem and go directly into the em_dispatch()-loop, - * where they are ready to process events as soon as the EOs have been - * started and queues enabled. - */ - - /* Ensure all EM cores can find the default event pool */ - if (em_pool_find(EM_POOL_DEFAULT_NAME) != EM_POOL_DEFAULT) - APPL_EXIT_FAILURE("em_pool_find(%s) c:%d", - EM_POOL_DEFAULT_NAME, em_core_id()); - - /* Look up the shared memory */ - test_init(); - } - - const int core_id = em_core_id(); - const uint64_t cores = (uint64_t)em_core_count(); - - const char *str = appl_conf->dispatch_rounds == 0 ? - "forever" : "rounds"; - - APPL_PRINT("Entering the event dispatch loop(%s=%d) on EM-core:%02d\n", - str, appl_conf->dispatch_rounds, core_id); - - /* - * Keep all cores dispatching until 'test_start()' has been - * completed in order to handle sync-API function calls and to enter - * the main dispatch loop almost at the same time. - */ - env_atomic64_inc(&sync->enter_count); - do { - em_dispatch(STARTUP_DISPATCH_ROUNDS); - if (core_id == 0) { - /* Start pktio if configured */ - if (appl_conf->pktio.if_count > 0) - pktio_start(); - env_atomic64_inc(&sync->enter_count); - } - } while (env_atomic64_get(&sync->enter_count) <= 2 * cores); -} - -/** - * Core runner - application entry on each EM-core - * - * Application setup and event dispatch loop run by each EM-core. - * A call to em_init_core() MUST be made on each EM-core before using other - * EM API functions to create EOs, queues etc. or calling em_dispatch(). - * - * @param arg passed arg actually of type 'appl_shm_t *', i.e. appl shared mem - */ -static int -run_core_fn(void *arg) -{ - odp_shm_t shm; - appl_shm_t *appl_shm; - void *shm_addr; - appl_conf_t *appl_conf; - sync_t *sync; - em_status_t stat; - bool is_thread_per_core; - - /* thread: depend on the odp helper to call odp_init_local */ - /* process: parent called odp_init_local, fork creates copy for child */ - - appl_shm = (appl_shm_t *)arg; - - /* Look up the appl shared memory - sanity check */ - shm = odp_shm_lookup("appl_shm"); - if (unlikely(shm == ODP_SHM_INVALID)) - APPL_EXIT_FAILURE("appl_shm lookup failed"); - shm_addr = odp_shm_addr(shm); - if (unlikely(shm_addr == NULL || shm_addr != (void *)appl_shm)) - APPL_EXIT_FAILURE("obtaining shared mem addr failed:\n" - "shm_addr:%p appl_shm:%p", - shm_addr, appl_shm); - - appl_conf = &appl_shm->appl_conf; - sync = &appl_shm->sync; - is_thread_per_core = appl_shm->em_conf.thread_per_core ? true : false; - - /* - * Allow testing different startup scenarios: - */ - switch (appl_conf->startup_mode) { - case STARTUP_ALL_CORES: - /* - * All EM-cores start-up and init before appication setup - */ - startup_all_cores(sync, appl_conf, is_thread_per_core); - break; - case STARTUP_ONE_CORE_FIRST: - /* - * Only one EM-core start-up and init before application setup, - * the rest of the EM-cores are init after that. - */ - startup_one_core_first(sync, appl_conf, is_thread_per_core); - break; - default: - APPL_EXIT_FAILURE("Unsupported startup-mode:%d", - appl_conf->startup_mode); - break; - } - - APPL_PRINT("%s() on EM-core:%02d\n", __func__, em_core_id()); - - /* - * Enter the EM event dispatch loop (0==forever) on this EM-core. - */ - int core_id = em_core_id(); - uint64_t cores = (uint64_t)em_core_count(); - uint32_t dispatch_rounds = appl_conf->dispatch_rounds; - uint32_t exit_check_rounds = EXIT_CHECK_DISPATCH_ROUNDS; - uint32_t rounds; - - if (dispatch_rounds == 0) { - /* - * Dispatch forever, in chunks of 'exit_check_rounds', - * or until 'exit_flag' is set by SIGINT (CTRL-C). - */ - while (!appl_shm->exit_flag) - em_dispatch(exit_check_rounds); - } else { - /* - * Dispatch for 'dispatch_rounds' in chunks of 'rounds', - * or until 'exit_flag' is set by SIGINT (CTRL-C). - */ - rounds = MIN(dispatch_rounds, exit_check_rounds); - do { - em_dispatch(rounds); - dispatch_rounds -= rounds; - } while (dispatch_rounds > rounds && !appl_shm->exit_flag); - - if (dispatch_rounds > 0) { - rounds = MIN(dispatch_rounds, rounds); - em_dispatch(rounds); - } - } - /* - * Allow apps one more round with 'exit_flag' set to flush events from - * the sched queues etc. - */ - if (!appl_shm->exit_flag) - appl_shm->exit_flag = 1; /* potential race with SIGINT-handler*/ - em_dispatch(exit_check_rounds); - - /* - * Dispatch-loop done for application, prepare for controlled shutdown - */ - - uint64_t exit_count = env_atomic64_return_add(&sync->exit_count, 1); - - /* First core to exit dispatch stops the application */ - if (exit_count == 0) { - if (appl_conf->pktio.if_count > 0) { - /* halt further pktio rx & tx */ - pktio_halt(); - /* dispatch with pktio stopped before test_stop()*/ - em_dispatch(TERM_DISPATCH_ROUNDS); - } - /* - * Stop and delete created application EOs - */ - test_stop(appl_conf); - } - - /* - * Continue dispatching until all cores have exited the dispatch loop - * and until 'test_stop()' has been completed, the cores might have to - * react to teardown related events such as EM function completion - * events & notifs. - */ - do { - em_dispatch(TERM_DISPATCH_ROUNDS); - if (exit_count == 0) { - /* - * First core to exit increments 'exit_count' twice - - * this ensures that all other cores will stay in this - * dispatch loop until the first core reaches the loop. - */ - env_atomic64_inc(&sync->exit_count); - } - exit_count = env_atomic64_get(&sync->exit_count); - } while (exit_count <= cores); - - /* - * Proper application teardown should have been completed on all cores, - * still do some 'empty' dispatch rounds to drain all possibly - * remaining events in the system. - */ - while (em_dispatch(TERM_DISPATCH_ROUNDS) > 0) - ; - - APPL_PRINT("Left the event dispatch loop on EM-core:%02d\n", core_id); - - odp_barrier_wait(&sync->exit_barrier); - - if (core_id == 0) { - /* - * Free allocated test resources - */ - test_term(); - } - - odp_barrier_wait(&sync->exit_barrier); - - stat = em_term_core(); - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_term_core(%d):%" PRI_STAT "", - core_id, stat); - - odp_barrier_wait(&sync->exit_barrier); - - /* depend on the odp helper to call odp_term_local */ - - return 0; -} - -/** - * Parse and store relevant command line arguments. Set config options for both - * application and EM. - * - * EM options are stored into em_conf and application specific options into - * appl_conf. Note that both application and EM parsing is done here since EM - * should not, by design, be concerned with the parsing of options, instead - * em_conf_t specifies the options needed by the EM-implementation (HW, device - * and env specific). - * - * @param argc Command line argument count - * @param argv[] Command line arguments - * @param em_conf EM config options parsed from argv[] - * @param appl_conf Application config options parsed from argv[] - */ -static void -parse_args(int argc, char *argv[], parse_args_t *parsed /* out param */) -{ - static const struct option longopts[] = { - {"coremask", required_argument, NULL, 'c'}, - {"process-per-core", no_argument, NULL, 'p'}, - {"thread-per-core", no_argument, NULL, 't'}, - {"device-id", required_argument, NULL, 'd'}, - {"dispatch-rounds", required_argument, NULL, 'r'}, - {"eth-interface", required_argument, NULL, 'i'}, - {"pktpool-em", no_argument, NULL, 'e'}, - {"pktpool-odp", no_argument, NULL, 'o'}, - {"pktin-mode", required_argument, NULL, 'm'}, - {"startup-mode", required_argument, NULL, 's'}, - {"help", no_argument, NULL, 'h'}, - {NULL, 0, NULL, 0} - }; - static const char *shortopts = "+c:ptd:r:i:oem:s:h"; - long device_id = -1; - - /* set defaults: */ - parsed->args_appl.pktio.in_mode = DIRECT_RECV; - parsed->args_appl.startup_mode = STARTUP_ALL_CORES; - - opterr = 0; /* don't complain about unknown options here */ - - APPL_PRINT("EM application options:\n"); - - /* - * Parse the application & EM arguments and save core mask. - * Note: Use '+' at the beginning of optstring: - * - don't permute the contents of argv[]. - * Note: Stops at "--" - */ - while (1) { - int opt; - int long_index; - - opt = getopt_long(argc, argv, shortopts, longopts, &long_index); - - if (opt == -1) - break; /* No more options */ - - switch (opt) { - case 'c': { - char *mask_str = optarg; - char tmp_str[EM_CORE_MASK_STRLEN]; - int err; - - /* - * Store the core mask for EM - usage depends on the - * process-per-core or thread-per-core mode selected. - */ - em_core_mask_zero(&parsed->args_em.phys_mask); - err = em_core_mask_set_str(mask_str, - &parsed->args_em.phys_mask); - if (err) - APPL_EXIT_FAILURE("Invalid coremask(%s) given", - mask_str); - - parsed->args_em.core_count = - em_core_mask_count(&parsed->args_em.phys_mask); - - em_core_mask_tostr(tmp_str, sizeof(tmp_str), - &parsed->args_em.phys_mask); - APPL_PRINT(" Coremask: %s\n" - " Core Count: %i\n", - tmp_str, parsed->args_em.core_count); - } - break; - - case 'p': - parsed->args_em.process_per_core = 1; - break; - - case 't': - parsed->args_em.thread_per_core = 1; - break; - - case 'd': { - char *endptr; - - device_id = strtol(optarg, &endptr, 0); - - if (*endptr != '\0' || - (uint64_t)device_id > UINT16_MAX) - APPL_EXIT_FAILURE("Invalid device-id:%s", - optarg); - - parsed->args_em.device_id = (uint16_t)(device_id & 0xffff); - } - break; - - case 'r': - parsed->args_appl.dispatch_rounds = atoi(optarg); - if (atoi(optarg) < 0) - APPL_EXIT_FAILURE("Invalid dispatch-rounds:%s", - optarg); - break; - - case 'i': { - int i; - size_t len, max; - char *name; - - name = strtok(optarg, ","); - for (i = 0; name != NULL; i++) { - if (i > IF_MAX_NUM - 1) - APPL_EXIT_FAILURE("Too many if's:%d", - i + 1); - max = sizeof(parsed->args_appl.pktio.if_name[i]); - len = strnlen(name, max); - if (len + 1 > max) - APPL_EXIT_FAILURE("Invalid if name:%s", - name); - - strncpy(parsed->args_appl.pktio.if_name[i], name, len); - parsed->args_appl.pktio.if_name[i][len + 1] = '\0'; - - name = strtok(NULL, ","); - } - parsed->args_appl.pktio.if_count = i; - } - break; - - case 'e': - parsed->args_appl.pktio.pktpool_em = true; - break; - - case 'o': - parsed->args_appl.pktio.pktpool_odp = true; - break; - - case 'm': { - int mode = atoi(optarg); - - if (mode == 0) { - parsed->args_appl.pktio.in_mode = DIRECT_RECV; - } else if (mode == 1) { - parsed->args_appl.pktio.in_mode = PLAIN_QUEUE; - } else if (mode == 2) { - parsed->args_appl.pktio.in_mode = SCHED_PARALLEL; - } else if (mode == 3) { - parsed->args_appl.pktio.in_mode = SCHED_ATOMIC; - } else if (mode == 4) { - parsed->args_appl.pktio.in_mode = SCHED_ORDERED; - } else { - usage(argv[0]); - APPL_EXIT_FAILURE("Unknown value: -m, --pktin-mode = %d", mode); - } - } - break; - - case 's': { - int mode = atoi(optarg); - - if (mode == 0) { - parsed->args_appl.startup_mode = STARTUP_ALL_CORES; - } else if (mode == 1) { - parsed->args_appl.startup_mode = STARTUP_ONE_CORE_FIRST; - } else { - usage(argv[0]); - APPL_EXIT_FAILURE("Unknown value: -s, --startup-mode = %d", mode); - } - } - break; - - case 'h': - usage(argv[0]); - exit(EXIT_SUCCESS); - break; - - default: - usage(argv[0]); - APPL_EXIT_FAILURE("Unknown option!"); - break; - } - } - - optind = 1; /* reset 'extern optind' from the getopt lib */ - - /* Sanity check: */ - if (!parsed->args_em.core_count) { - usage(argv[0]); - APPL_EXIT_FAILURE("Give mandatory coremask!"); - } - - /* Check if a device-id was given, if not use the default '0' */ - if (device_id == -1) /* not set */ - parsed->args_em.device_id = 0; - APPL_PRINT(" Device-id: 0x%" PRIX16 "\n", parsed->args_em.device_id); - - /* Sanity checks: */ - if (!(parsed->args_em.process_per_core ^ parsed->args_em.thread_per_core)) { - usage(argv[0]); - APPL_EXIT_FAILURE("Select EITHER process-per-core(-p) OR thread-per-core(-t)!"); - } - if (parsed->args_em.thread_per_core) - APPL_PRINT(" EM mode: Thread-per-core\n"); - else - APPL_PRINT(" EM mode: Process-per-core\n"); - - if (parsed->args_appl.pktio.if_count > 0) { - if (parsed->args_appl.pktio.pktpool_em && parsed->args_appl.pktio.pktpool_odp) { - usage(argv[0]); - APPL_EXIT_FAILURE("Select EITHER pktpool-em(-e) OR pktpool-odp(-o)!"); - } - if (!parsed->args_appl.pktio.pktpool_em && !parsed->args_appl.pktio.pktpool_odp) - parsed->args_appl.pktio.pktpool_em = true; /* default if none given */ - - if (parsed->args_appl.pktio.pktpool_em) - APPL_PRINT(" Pktio pool: EM event-pool\n"); - else - APPL_PRINT(" Pktio pool: ODP pkt-pool\n"); - } else { - parsed->args_appl.pktio.pktpool_em = false; - parsed->args_appl.pktio.pktpool_odp = false; - } - - const char *startup_mode_str = ""; - - /* Startup-mode */ - if (parsed->args_appl.startup_mode == STARTUP_ALL_CORES) - startup_mode_str = "All EM-cores before application"; - else if (parsed->args_appl.startup_mode == STARTUP_ONE_CORE_FIRST) - startup_mode_str = "One EM-core before application (then the rest)"; - /* other values are reported as errors earlier in parsing */ - - APPL_PRINT(" Startup-mode: %s\n", startup_mode_str); - - /* Store the application name */ - size_t len = sizeof(parsed->args_appl.name); - - strncpy(parsed->args_appl.name, NO_PATH(argv[0]), len); - parsed->args_appl.name[len - 1] = '\0'; -} - -/** - * Verify the cpu setup - sanity check and store cpus to use - * - * Verify the cpu count and mask against system values - */ -static void -verify_cpu_setup(const parse_args_t *parsed, - cpu_conf_t *cpu_conf /* out */) -{ - odp_cpumask_t invalid_mask; - odp_cpumask_t check_mask; - odp_cpumask_t zero_mask; - int usable_cpus; - cpu_set_t cpuset; - int ret; - - const odp_cpumask_t *cpu_mask = &parsed->args_em.phys_mask.odp_cpumask; - int num_cpus = parsed->args_em.core_count; - - /* - * Verify cpu setup - */ - if (num_cpus > MAX_THREADS) - APPL_EXIT_FAILURE("Setup configured for max %d cores, not %d", - MAX_THREADS, num_cpus); - - odp_cpumask_zero(&invalid_mask); - odp_cpumask_zero(&check_mask); - odp_cpumask_zero(&zero_mask); - usable_cpus = 0; - - CPU_ZERO(&cpuset); - /* get the cpus/cores available to this application */ - ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); - if (ret < 0) - APPL_EXIT_FAILURE("sched_getaffinity:%d errno(%d):%s", - ret, errno, strerror(errno)); - - /* count the usable cpus and also record the invalid cpus */ - for (int i = 0; i < CPU_SETSIZE - 1; i++) { - if (CPU_ISSET(i, &cpuset)) - usable_cpus++; - else - odp_cpumask_set(&invalid_mask, i); - } - - /* - * Make sure no cpu in the cpu_mask is set in the invalid_mask. - * For a valid setup check_mask will be all-zero, otherwise it - * will contain the invalid cpus. - */ - odp_cpumask_and(&check_mask, &invalid_mask, cpu_mask); - if (!odp_cpumask_equal(&zero_mask, &check_mask) || - num_cpus > usable_cpus) { - char cpus_str[ODP_CPUMASK_STR_SIZE]; - char check_str[ODP_CPUMASK_STR_SIZE]; - - memset(cpus_str, '\0', sizeof(cpus_str)); - memset(check_str, '\0', sizeof(check_str)); - odp_cpumask_to_str(cpu_mask, cpus_str, sizeof(cpus_str)); - odp_cpumask_to_str(&check_mask, check_str, sizeof(check_str)); - - APPL_EXIT_FAILURE("Invalid cpus - requested:%d available:%d\n" - "cpu_mask:%s of which invalid-cpus:%s", - num_cpus, usable_cpus, cpus_str, check_str); - } - - /* - * Store the cpu conf to be set up for ODP - */ - odp_cpumask_copy(&cpu_conf->worker_mask, - &parsed->args_em.phys_mask.odp_cpumask); - cpu_conf->num_worker = parsed->args_em.core_count; -} - -/** - * Install a signal handler - */ -static void -install_sig_handler(int signum, void (*sig_handler)(int), int flags) -{ - struct sigaction sa; - - sigemptyset(&sa.sa_mask); - - sa.sa_flags = SA_RESTART; /* restart interrupted system calls */ - sa.sa_flags |= flags; - sa.sa_handler = sig_handler; - - if (sigaction(signum, &sa, NULL) == -1) - APPL_EXIT_FAILURE("sigaction() fails (errno(%i)=%s)", - errno, strerror(errno)); -} - -/** - * Signal handler for SIGINT (e.g. Ctrl-C to stop the program) - */ -static void -sigint_handler(int signo ODP_UNUSED) -{ - if (appl_shm == NULL) - return; - appl_shm->exit_flag = 1; -} - -/** - * Signal handler for SIGCHLD (parent receives when child process dies). - */ -static void -sigchld_handler(int sig ODP_UNUSED) -{ - int status; - pid_t child; - - /* Child-process termination requested, normal tear-down, just return */ - if (appl_shm->exit_flag) - return; - - /* Nonblocking waits until no more dead children are found */ - do { - child = waitpid(-1, &status, WNOHANG); - } while (child > 0); - - if (child == -1 && errno != ECHILD) - _exit(EXIT_FAILURE); - - /* - * Exit the parent process - triggers SIGTERM in the remaining children - * (set by prctl(PR_SET_PDEATHSIG, SIGTERM)). - */ - _exit(EXIT_SUCCESS); -} - -__attribute__((format(printf, 2, 0))) -int appl_vlog(em_log_level_t level, const char *fmt, va_list args) -{ - int r; - FILE *logfd; - - switch (level) { - case EM_LOG_DBG: - case EM_LOG_PRINT: - logfd = stdout; - break; - case EM_LOG_ERR: - default: - logfd = stderr; - break; - } - - r = vfprintf(logfd, fmt, args); - return r; -} - -__attribute__((format(printf, 2, 3))) -int appl_log(em_log_level_t level, const char *fmt, ...) -{ - va_list args; - int r; - - va_start(args, fmt); - r = appl_vlog(level, fmt, args); - va_end(args); - - return r; -} - -/** - * Delay spinloop - */ -void delay_spin(const uint64_t spin_count) -{ - env_atomic64_t dummy; /* use atomic to avoid optimization */ - uint64_t i; - - env_atomic64_init(&dummy); - - if (likely(appl_shm)) { - for (i = 0; i < spin_count && !appl_shm->exit_flag; i++) - env_atomic64_inc(&dummy); - } else { - for (i = 0; i < spin_count; i++) - env_atomic64_inc(&dummy); - } -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine common initialization functions + * + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "cm_setup.h" +#include "cm_pool_config.h" +#include "cm_pktio.h" +#include "cm_error_handler.h" + +/** + * @def USAGE_FMT + * Usage help string + */ +#define USAGE_FMT \ +"\n" \ +"Usage: %s APPL&EM-OPTIONS\n" \ +" E.g. %s -c 0xfe -p\n" \ +"\n" \ +"Event Machine (EM) example application.\n" \ +"\n" \ +"Mandatory EM-OPTIONS:\n" \ +" -c, --coremask Select the cores to use, hexadecimal\n" \ +" -p, --process-per-core Running EM with one process per core.\n" \ +" -t, --thread-per-core Running EM with one thread per core.\n" \ +" Select EITHER -p OR -t, but not both!\n" \ +"\n" \ +"Optional [APPL&EM-OPTIONS]\n" \ +" -d, --device-id Device-id, hexadecimal (default: 0x0)\n" \ +" -r, --dispatch-rounds Number of dispatch rounds (default: 0=forever)\n"\ +" -s, --startup-mode Application startup mode:\n" \ +" 0: Start-up & init all EM cores before appl-setup (default)\n" \ +" 1: Start-up & init only one EM core before appl-setup,\n" \ +" the rest of the EM-cores are init only after that.\n" \ +"Packet-IO\n" \ +" -m, --pktin-mode Select the packet-input mode to use:\n" \ +" 0: Direct mode: PKTIN_MODE_DIRECT (default)\n" \ +" 1: Plain queue mode: PKTIN_MODE_QUEUE\n" \ +" 2: Scheduler mode with parallel queues:\n" \ +" PKTIN_MODE_SCHED + SCHED_SYNC_PARALLEL\n" \ +" 3: Scheduler mode with atomic queues:\n" \ +" PKTIN_MODE_SCHED + SCHED_SYNC_ATOMIC\n" \ +" 4: Scheduler mode with ordered queues:\n" \ +" PKTIN_MODE_SCHED + SCHED_SYNC_ORDERED\n" \ +" -v, --pktin-vector Enable vector-mode for packet-input (default: disabled)\n"\ +" Supported with --pktin-mode:s 2, 3, 4\n" \ +" -i, --eth-interface Select the ethernet interface(s) to use\n" \ +" -e, --pktpool-em Packet-io pool is an EM-pool (default)\n" \ +" -o, --pktpool-odp Packet-io pool is an ODP-pool\n" \ +" -x, --vecpool-em Packet-io vector pool is an EM-pool (default)\n" \ +" -y, --vecpool-odp Packet-io vector pool is an ODP-pool\n" \ +" Select EITHER -e OR -o, but not both!\n" \ +"Help\n" \ +" -h, --help Display help and exit.\n" \ +"\n" + +/** + * Stored command line arguments given at startup + * + * @see USAGE_FMT + */ +typedef struct { + /** EM cmd line args */ + struct { + /** EM device id */ + uint16_t device_id; + /** RunMode: EM run with a thread per core */ + int thread_per_core; + /** RunMode: EM run with a process per core */ + int process_per_core; + /** Number of EM-cores (== nbr of EM-threads or EM-processes) */ + int core_count; + /** Physical core mask, exact listing of cores for EM */ + em_core_mask_t phys_mask; + } args_em; + + /** Application cmd line args */ + struct { + /** Application name */ + char name[APPL_NAME_LEN]; + /** Start-up mode */ + startup_mode_t startup_mode; + /** Dispatch rounds before returning */ + uint32_t dispatch_rounds; + /** Packet I/O parameters */ + struct { + /** Packet input mode */ + pktin_mode_t in_mode; + /** Packet input vectors enabled (true/false) */ + bool pktin_vector; + /** Interface count */ + int if_count; + /** Interface names + placeholder for '\0' */ + char if_name[IF_MAX_NUM][IF_NAME_LEN + 1]; + /** Pktio is setup with an EM event-pool (true/false) */ + bool pktpool_em; + /** Pktio is setup with an ODP pkt-pool (true/false) */ + bool pktpool_odp; + /** Pktio is setup with an EM vector pool (if pkt-input vectors enabled) */ + bool vecpool_em; + /** Pktio is setup with an ODP vector pool (if pkt-input vectors enabled) */ + bool vecpool_odp; + } pktio; + } args_appl; +} parse_args_t; + +/** + * CPU config to be used + */ +typedef struct { + /** Number of CPUs to run EM-cores */ + int num_worker; + /** Worker_mask specifying cores for EM */ + odp_cpumask_t worker_mask; +} cpu_conf_t; + +/** + * Dispatch rounds for em_dispatch() during start-up to properly sync the + * cores to enter the main dispatch loop at roughly the same time. + */ +#define STARTUP_DISPATCH_ROUNDS 16 + +/** + * Dispatch rounds for em_dispatch() during program execution to regularly + * return from dipatch and inspect the 'appl_shm->exit_flag' value. Program + * termination will begin once a set 'appl_shm->exit_flags' has been noticed. + */ +#define EXIT_CHECK_DISPATCH_ROUNDS 20000 + +/** + * Dispatch rounds for em_dispatch() during termination to properly sync the + * cores and shutdown actions and allow for a graceful shutdown. + */ +#define TERM_DISPATCH_ROUNDS 16 + +static void +parse_args(int argc, char *argv[], parse_args_t *parse_args /* out */); + +static void +verify_cpu_setup(const parse_args_t *parsed, + cpu_conf_t *cpu_conf /* out */); + +static odp_instance_t +init_odp(const parse_args_t *parsed, const cpu_conf_t *cpu_conf); + +static void +init_sync(sync_t *const sync, int num_cpus); + +static void +init_em(const parse_args_t *parsed, em_conf_t *em_conf /* out */); + +static void +init_appl_conf(const parse_args_t *parsed, appl_conf_t *appl_conf /* out */); + +static void +create_pktio(appl_conf_t *appl_conf/*in/out*/, const cpu_conf_t *cpu_conf); +static void +term_pktio(const appl_conf_t *appl_conf); + +static int +create_odp_threads(odp_instance_t instance, + const parse_args_t *parsed, const cpu_conf_t *cpu_conf, + int (*start_fn)(void *fn_arg), void *fn_arg, + odph_thread_t thread_tbl[/*out*/]); +static int +run_core_fn(void *arg); + +static void +install_sig_handler(int signum, void (*sig_handler)(int), int flags); + +static void +sigchld_handler(int sig ODP_UNUSED); +static void +sigint_handler(int signo ODP_UNUSED); + +static void +usage(char *progname) +{ + APPL_PRINT(USAGE_FMT, NO_PATH(progname), NO_PATH(progname)); +} + +/** + * Global pointer to common application shared memory + */ +appl_shm_t *appl_shm; + +/** + * Common setup function for em-odp example programs + */ +int cm_setup(int argc, char *argv[]) +{ + /* use unbuffered stdout */ + if (setvbuf(stdout, NULL, _IONBF, 0) != 0) + APPL_EXIT_FAILURE("setvbuf() fails (errno(%i)=%s)", + errno, strerror(errno)); + + /* + * Parse the command line arguments + */ + parse_args_t parsed; /* filled during cmd line arg parsing */ + + memset(&parsed, 0, sizeof(parsed)); + parse_args(argc, argv, &parsed/* out */); + + /* + * Verify the cpu setup and extract the cpu config + */ + cpu_conf_t cpu_conf; + + memset(&cpu_conf, 0, sizeof(cpu_conf)); + verify_cpu_setup(&parsed, &cpu_conf/* out */); + + /* + * Init ODP with given args and cpu setup + * + * Calls odp_init_global() and odp_init_local() for this thread + * before returning. + */ + odp_instance_t instance; + + instance = init_odp(&parsed, &cpu_conf); + + APPL_PRINT("\n" + "*********************************************************\n" + "Setting up EM on ODP-version:\n" + "%s\n" + "*********************************************************\n" + "\n", + odp_version_impl_str()); + + /* + * Setup shared memory + * + * Reserve application shared memory in one chunk. + */ + uint32_t flags = 0; + +#if ODP_VERSION_API_NUM(1, 33, 0) > ODP_VERSION_API + flags |= ODP_SHM_SINGLE_VA; +#else + odp_shm_capability_t shm_capa; + int err = odp_shm_capability(&shm_capa); + + if (unlikely(err)) + APPL_EXIT_FAILURE("shm capability error:%d", err); + + if (shm_capa.flags & ODP_SHM_SINGLE_VA) + flags |= ODP_SHM_SINGLE_VA; +#endif + odp_shm_t shm = odp_shm_reserve("appl_shm", sizeof(appl_shm_t), + ODP_CACHE_LINE_SIZE, flags); + if (unlikely(shm == ODP_SHM_INVALID)) + APPL_EXIT_FAILURE("appl shared mem reservation failed"); + appl_shm = odp_shm_addr(shm); + if (unlikely(appl_shm == NULL)) + APPL_EXIT_FAILURE("obtaining shared mem addr failed"); + memset(appl_shm, 0, sizeof(appl_shm_t)); + + /* + * Initialize application start-up & exit synchronization + */ + sync_t *const sync = &appl_shm->sync; + + init_sync(sync, cpu_conf.num_worker); + + /* + * Init EM with given args + * + * Calls em_init() before returning. + */ + em_conf_t *const em_conf = &appl_shm->em_conf; + + init_em(&parsed, em_conf); + + /* + * Set application conf based on parsed cmd line arguments + */ + appl_conf_t *const appl_conf = &appl_shm->appl_conf; + + init_appl_conf(&parsed, appl_conf); + + /* + * Create packet-I/O, if requested + */ + if (appl_conf->pktio.if_count > 0) + create_pktio(appl_conf/*in/out*/, &cpu_conf); + + /* + * Signal handler for SIGCHLD in process-per-core mode + * + * Create a signal handler for the SIGCHLD signal that is sent + * to the parent process when a forked child process dies. + */ + if (em_conf->process_per_core) + install_sig_handler(SIGCHLD, sigchld_handler, 0); + + /* + * Signal handler for SIGINT (Ctrl-C) + * + * Create a signal handler for the SIGINT (Ctrl-C) signal to flag + * program termination. + * Set the 'SA_RESETHAND'-flag to reset the SIGINT handler to its + * default disposition after the first handling to be able to stop + * execution if the application misbehaves. + */ + install_sig_handler(SIGINT, sigint_handler, SA_RESETHAND); + + /* + * Create the odp-threads to use as EM-cores + * + * Create the odp-threads / EM-cores. Each EM-core will run the + * 'run_core_fn(appl_shm)' function in a thread pinned to a single cpu + * as specified by 'cpu_conf'. + */ + odph_thread_t *const thread_tbl = appl_shm->thread_tbl; + int ret = create_odp_threads(instance, &parsed, &cpu_conf, + run_core_fn /*fn*/, appl_shm /*fn_arg*/, + thread_tbl /*out*/); + if (ret != cpu_conf.num_worker) + APPL_EXIT_FAILURE("ODP thread creation failed:%d", ret); + + /* + * Wait for the created odp-threads / EM-cores to return + */ + ret = odph_thread_join(thread_tbl, cpu_conf.num_worker); + if (ret != cpu_conf.num_worker) + APPL_EXIT_FAILURE("ODP thread join failed:%d", ret); + + /* + * Teardown the application after all the odp-threads / EM-cores + * have ended: + */ + + /* + * Terminate packet-I/O, if set up + */ + if (appl_conf->pktio.if_count > 0) + term_pktio(appl_conf); + + /* + * Terminate EM + * + * All EM-cores have already run em_term_core() + */ + em_status_t stat = em_term(em_conf); + + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_term():%" PRI_STAT "", stat); + + /* + * Free shared memory + */ + ret = odp_shm_free(shm); + if (ret != 0) + APPL_EXIT_FAILURE("appl shared mem free failed:%d", ret); + + /** + * Terminate ODP + */ + ret = odp_term_local(); + if (ret != 0) + APPL_EXIT_FAILURE("Last ODP local term failed:%d", ret); + ret = odp_term_global(instance); + if (ret != 0) + APPL_EXIT_FAILURE("odp_term_global() failed:%d", ret); + + APPL_PRINT("\nDone - exit\n\n"); + + return EXIT_SUCCESS; +} + +static odp_instance_t +init_odp(const parse_args_t *parsed, const cpu_conf_t *cpu_conf) +{ + odp_init_t init_params; + odp_instance_t instance; + int ret; + + /* Initialize the odp init params with 'default' values */ + odp_init_param_init(&init_params); + + /* Restrict odp worker threads to cores set in the 'worker_mask' */ + init_params.num_worker = cpu_conf->num_worker; + init_params.worker_cpus = &cpu_conf->worker_mask; + + /** + * Leave "init_params.control_cpus" unset to use odp default control + * cpus, which are the rest of installed cpus excluding worker cpus + * and CPU 0 when worker cpus don't have CPU 1 set. But if worker cpus + * have CPU 1 set, CPU 0 will be set as a control cpu. + */ + + /* + * List odp features not to be used in the examples. This may optimize + * performance. Note that a real application might need to change this! + */ + init_params.not_used.feat.cls = 1; /* don't use the odp classifier */ + init_params.not_used.feat.compress = 1; /* don't use the odp compress */ + init_params.not_used.feat.crypto = 1; /* don't use odp crypto */ + init_params.not_used.feat.ipsec = 1; /* don't use odp ipsec */ + init_params.not_used.feat.tm = 1; /* don't use the odp traffic manager*/ + + /* + * Set the memory model to use for odp: thread or process. + * parse_args() has verified .thread_per_core vs .process_per_core + */ + if (parsed->args_em.thread_per_core) + init_params.mem_model = ODP_MEM_MODEL_THREAD; + else + init_params.mem_model = ODP_MEM_MODEL_PROCESS; + + ret = odp_init_global(&instance, &init_params, NULL); + if (ret != 0) + APPL_EXIT_FAILURE("ODP global init failed:%d", ret); + + ret = odp_init_local(instance, ODP_THREAD_CONTROL); + if (ret != 0) + APPL_EXIT_FAILURE("ODP local init failed:%d", ret); + + /* Configure the scheduler */ + odp_schedule_config_t sched_config; + + odp_schedule_config_init(&sched_config); + /* EM does not need the ODP predefined scheduling groups */ + sched_config.sched_group.all = 0; + sched_config.sched_group.control = 0; + sched_config.sched_group.worker = 0; + ret = odp_schedule_config(&sched_config); + if (ret != 0) + APPL_EXIT_FAILURE("ODP schedule config failed:%d", ret); + + /* Print ODP system info */ + odp_sys_info_print(); + + return instance; +} + +static void +init_sync(sync_t *const sync, int num_cpus) +{ + odp_barrier_init(&sync->start_barrier, num_cpus); + odp_barrier_init(&sync->exit_barrier, num_cpus); + env_atomic64_init(&sync->exit_count); + env_atomic64_init(&sync->enter_count); +} + +static void +init_em(const parse_args_t *parsed, em_conf_t *em_conf /* out */) +{ + em_status_t stat; + + em_conf_init(em_conf); + + /* Set EM conf based on parsed cmd line arguments */ + em_conf->device_id = parsed->args_em.device_id; + em_conf->thread_per_core = parsed->args_em.thread_per_core; + em_conf->process_per_core = parsed->args_em.process_per_core; + em_conf->core_count = parsed->args_em.core_count; + em_conf->phys_mask = parsed->args_em.phys_mask; + + /* Event-Timer: disable=0, enable=1 */ + em_conf->event_timer = 1; + + /* + * Set the default pool config in em_conf, needed internally by EM + * at startup. Note that if default pool configuration is provided + * in em-odp.conf at runtime through option 'startup_pools', this + * default pool config will be overridden and thus ignored. + */ + em_pool_cfg_t default_pool_cfg; + + em_pool_cfg_init(&default_pool_cfg); /* mandatory */ + default_pool_cfg.event_type = EM_EVENT_TYPE_SW; + default_pool_cfg.align_offset.in_use = true; /* override config file */ + default_pool_cfg.align_offset.value = 0; /* set explicit '0 bytes' */ + default_pool_cfg.user_area.in_use = true; /* override config file */ + default_pool_cfg.user_area.size = 0; /* set explicit '0 bytes' */ + default_pool_cfg.num_subpools = 4; + default_pool_cfg.subpool[0].size = 256; + default_pool_cfg.subpool[0].num = 16384; + default_pool_cfg.subpool[0].cache_size = 64; + default_pool_cfg.subpool[1].size = 512; + default_pool_cfg.subpool[1].num = 1024; + default_pool_cfg.subpool[1].cache_size = 32; + default_pool_cfg.subpool[2].size = 1024; + default_pool_cfg.subpool[2].num = 1024; + default_pool_cfg.subpool[2].cache_size = 16; + default_pool_cfg.subpool[3].size = 2048; + default_pool_cfg.subpool[3].num = 1024; + default_pool_cfg.subpool[3].cache_size = 8; + + em_conf->default_pool_cfg = default_pool_cfg; + + /* + * User can override the EM default log functions by giving logging + * funcs of their own - here we just use the default (shown explicitly) + */ + em_conf->log.log_fn = NULL; + em_conf->log.vlog_fn = NULL; + + /* Packet-I/O */ + if (parsed->args_appl.pktio.if_count > 0) { + /* + * Request EM to poll input for pkts in the dispatch loop + */ + pktin_mode_t in_mode = parsed->args_appl.pktio.in_mode; + + if (in_mode == DIRECT_RECV) + em_conf->input.input_poll_fn = pktin_pollfn_direct; + else if (in_mode == PLAIN_QUEUE) + em_conf->input.input_poll_fn = pktin_pollfn_plainqueue; + /* in_mode: SCHED_... use no input_poll function! */ + + /* + * Request EM to drain buffered output in the dispatch loop + */ + em_conf->output.output_drain_fn = pktout_drainfn; + } + + /* + * Initialize the Event Machine. Every EM core still needs to call + * em_init_core() later. + * Note: the EM default pool config MUST be included in em_conf! + */ + stat = em_init(em_conf); + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_init(), EM error:%" PRI_STAT "", stat); +} + +static void +init_appl_conf(const parse_args_t *parsed, appl_conf_t *appl_conf /* out */) +{ + size_t len = sizeof(appl_conf->name); + + memcpy(appl_conf->name, parsed->args_appl.name, len); + appl_conf->name[len - 1] = '\0'; + + if (parsed->args_em.thread_per_core) { + appl_conf->num_procs = 1; + appl_conf->num_threads = parsed->args_em.core_count; + } else { + appl_conf->num_procs = parsed->args_em.core_count; + appl_conf->num_threads = parsed->args_em.core_count; + } + + appl_conf->dispatch_rounds = parsed->args_appl.dispatch_rounds; + appl_conf->startup_mode = parsed->args_appl.startup_mode; + + /* + * Create the other event pools used by the application. + * Note that em_term() will delete all remaining pools during + * termination. + */ + em_pool_cfg_t appl_pool_1_cfg; + + em_pool_cfg_init(&appl_pool_1_cfg); /* mandatory */ + appl_pool_1_cfg.event_type = EM_EVENT_TYPE_PACKET; + appl_pool_1_cfg.num_subpools = 4; + + appl_pool_1_cfg.subpool[0].size = 256; + appl_pool_1_cfg.subpool[0].num = 16384; + appl_pool_1_cfg.subpool[0].cache_size = 128; + + appl_pool_1_cfg.subpool[1].size = 512; + appl_pool_1_cfg.subpool[1].num = 1024; + appl_pool_1_cfg.subpool[1].cache_size = 64; + + appl_pool_1_cfg.subpool[2].size = 1024; + appl_pool_1_cfg.subpool[2].num = 1024; + appl_pool_1_cfg.subpool[2].cache_size = 32; + + appl_pool_1_cfg.subpool[3].size = 2048; + appl_pool_1_cfg.subpool[3].num = 1024; + appl_pool_1_cfg.subpool[3].cache_size = 16; + + em_pool_t appl_pool = em_pool_create(APPL_POOL_1_NAME, EM_POOL_UNDEF, + &appl_pool_1_cfg); + if (appl_pool == EM_POOL_UNDEF) + APPL_EXIT_FAILURE("appl pool:%s create failed", APPL_POOL_1_NAME); + appl_conf->pools[0] = appl_pool; + appl_conf->num_pools = 1; + + appl_conf->pktio.in_mode = parsed->args_appl.pktio.in_mode; + appl_conf->pktio.if_count = parsed->args_appl.pktio.if_count; + for (int i = 0; i < parsed->args_appl.pktio.if_count; i++) { + memcpy(appl_conf->pktio.if_name[i], + parsed->args_appl.pktio.if_name[i], IF_NAME_LEN + 1); + } + + appl_conf->pktio.pktpool_em = parsed->args_appl.pktio.pktpool_em; + appl_conf->pktio.pktin_vector = parsed->args_appl.pktio.pktin_vector; + appl_conf->pktio.vecpool_em = parsed->args_appl.pktio.vecpool_em; +} + +static void +create_pktio(appl_conf_t *appl_conf/*in/out*/, const cpu_conf_t *cpu_conf) +{ + pktio_mem_reserve(); + pktio_pool_create(appl_conf->pktio.if_count, + appl_conf->pktio.pktpool_em, + appl_conf->pktio.pktin_vector, + appl_conf->pktio.vecpool_em); + pktio_init(appl_conf); + /* Create a pktio instance for each interface */ + for (int i = 0; i < appl_conf->pktio.if_count; i++) { + int if_id = pktio_create(appl_conf->pktio.if_name[i], + appl_conf->pktio.in_mode, + appl_conf->pktio.pktin_vector, + appl_conf->pktio.if_count, + cpu_conf->num_worker); + if (unlikely(if_id < 0)) + APPL_EXIT_FAILURE("Cannot create pktio if:%s", + appl_conf->pktio.if_name[i]); + /* Store the interface id */ + appl_conf->pktio.if_ids[i] = if_id; + } +} + +static void +term_pktio(const appl_conf_t *appl_conf) +{ + /* Stop, close and free the pktio resources */ + pktio_stop(); + pktio_close(); + pktio_deinit(appl_conf); + pktio_pool_destroy(appl_conf->pktio.pktpool_em, + appl_conf->pktio.pktin_vector, + appl_conf->pktio.vecpool_em); + pktio_mem_free(); +} + +static int +create_odp_threads(odp_instance_t instance, + const parse_args_t *parsed, const cpu_conf_t *cpu_conf, + int (*start_fn)(void *fn_arg), void *fn_arg, + odph_thread_t thread_tbl[/*out*/]) +{ + odph_thread_common_param_t thr_common; + odph_thread_param_t thr_param; /* same for all thrs */ + int ret; + + /* + * Generate a thread summary for the user + */ + char cpumaskstr[ODP_CPUMASK_STR_SIZE]; + + APPL_PRINT("num worker: %i\n", cpu_conf->num_worker); + + odp_cpumask_to_str(&cpu_conf->worker_mask, cpumaskstr, + sizeof(cpumaskstr)); + APPL_PRINT("worker thread mask: %s\n", cpumaskstr); + + odph_thread_common_param_init(&thr_common); + thr_common.instance = instance; + thr_common.cpumask = &cpu_conf->worker_mask; + /* + * Select between pthreads and processes, + * parse_args() has verified .thread_per_core vs .process_per_core + */ + if (parsed->args_em.thread_per_core) + thr_common.thread_model = 0; /* pthreads */ + else + thr_common.thread_model = 1; /* processes */ + thr_common.sync = 1; /* Synchronize thread start up */ + thr_common.share_param = 1; /* same 'thr_param' for all threads */ + + odph_thread_param_init(&thr_param); + thr_param.start = start_fn; + thr_param.arg = fn_arg; + thr_param.thr_type = ODP_THREAD_WORKER; + + /* + * Create odp worker threads to run as EM-cores + */ + ret = odph_thread_create(thread_tbl /*out*/, + &thr_common, &thr_param, + cpu_conf->num_worker); + return ret; +} + +/** + * @brief Helper to run_core_fn(): + * Start-up and init all EM-cores before application setup + * + * @param sync Application start-up and tear-down synchronization vars + * @param appl_conf Application configuration + */ +static void startup_all_cores(sync_t *sync, appl_conf_t *appl_conf, + bool is_thread_per_core) +{ + em_status_t stat; + int core_id; + uint64_t cores; + + /* + * Initialize this thread of execution (proc, thread), i.e. EM-core + */ + stat = em_init_core(); + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_init_core():%" PRI_STAT ", EM-core:%02d", + stat, em_core_id()); + + odp_barrier_wait(&sync->start_barrier); + + if (appl_conf->pktio.if_count > 0) + pktio_mem_lookup(is_thread_per_core); + + odp_barrier_wait(&sync->start_barrier); + + /* + * EM is ready on this EM-core (= proc, thread or core) + * It is now OK to start creating EOs, queues etc. + * + * Note that only one core needs to create the shared memory, EO's, + * queues etc. needed by the application, all other cores need only + * look up the shared mem and go directly into the em_dispatch()-loop, + * where they are ready to process events as soon as the EOs have been + * started and queues enabled. + */ + core_id = em_core_id(); + cores = (uint64_t)em_core_count(); + + /* Ensure all EM cores can find the default event pool */ + if (em_pool_find(EM_POOL_DEFAULT_NAME) != EM_POOL_DEFAULT) + APPL_EXIT_FAILURE("em_pool_find(%s) c:%d", + EM_POOL_DEFAULT_NAME, core_id); + + if (core_id == 0) { + /* + * Initialize the application and allocate shared memory. + */ + test_init(); + } + + odp_barrier_wait(&sync->start_barrier); + + if (core_id != 0) { + /* Look up the shared memory */ + test_init(); + } + + const char *str = appl_conf->dispatch_rounds == 0 ? + "forever" : "rounds"; + + APPL_PRINT("Entering the event dispatch loop(%s=%d) on EM-core:%02d\n", + str, appl_conf->dispatch_rounds, core_id); + + odp_barrier_wait(&sync->start_barrier); /* to print pretty */ + + /* + * Don't use barriers to sync the cores after this! + * EM synchronous API funcs (e.g. em_eo_start_sync()) blocks until the + * function has completed on all cores - a barrier might hinder a core + * from completing an operation. + */ + + if (core_id == 0) { + /* + * Create and start application EOs, pass the appl_conf. + */ + test_start(appl_conf); + } + + /* + * Keep all cores dispatching until 'test_start()' has been + * completed in order to handle sync-API function calls and to enter + * the main dispatch loop almost at the same time. + */ + env_atomic64_inc(&sync->enter_count); + do { + em_dispatch(STARTUP_DISPATCH_ROUNDS); + if (core_id == 0) { + /* Start pktio if configured */ + if (appl_conf->pktio.if_count > 0) + pktio_start(); + env_atomic64_inc(&sync->enter_count); + } + } while (env_atomic64_get(&sync->enter_count) <= cores); +} + +/** + * @brief Helper to run_core_fn(): + * Start-up and init only one EM-core before application setup. The rest + * of the EM-cores are init only after that. + * + * @param sync Application start-up and tear-down synchronization vars + * @param appl_conf Application configuration + */ +static void startup_one_core_first(sync_t *sync, appl_conf_t *appl_conf, + bool is_thread_per_core) +{ + em_status_t stat; + uint64_t enter_count = env_atomic64_return_add(&sync->enter_count, 1); + + if (enter_count == 0) { + /* + * Initialize first EM-core + */ + stat = em_init_core(); + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_init_core():%" PRI_STAT ", EM-core:%02d", + stat, em_core_id()); + if (appl_conf->pktio.if_count > 0) + pktio_mem_lookup(is_thread_per_core); + + /* Ensure all EM cores can find the default event pool */ + if (em_pool_find(EM_POOL_DEFAULT_NAME) != EM_POOL_DEFAULT) + APPL_EXIT_FAILURE("em_pool_find(%s) c:%d", + EM_POOL_DEFAULT_NAME, em_core_id()); + + /* + * Don't use barriers to sync the cores after this! + * EM synchronous API funcs (e.g. em_eo_start_sync()) blocks until the + * function has completed on all cores - a barrier might hinder a core + * from completing an operation. + */ + } + + odp_barrier_wait(&sync->start_barrier); + + if (enter_count == 0) { + /* + * Initialize the application and allocate shared memory. + */ + test_init(); + /* + * Create and start application EOs, pass the appl_conf. + */ + test_start(appl_conf); + } else { + /* + * Sleep until the first EM-core has completed test_init() and + * test_start() to set up the application. + * Use sleep instead of barrier or lock etc. to avoid deadlock + * in case the first core is using sync-APIs and is waiting for + * completion by the other EM-cores, we need to go into dispatch + * (this is for testing only and NOT the most elegant start-up). + */ + sleep(1); + + /* + * Initialize this thread of execution (proc, thread), i.e. EM-core + */ + stat = em_init_core(); + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_init_core():%" PRI_STAT ", EM-core:%02d", + stat, em_core_id()); + + if (appl_conf->pktio.if_count > 0) + pktio_mem_lookup(is_thread_per_core); + + /* + * EM is ready on this EM-core (= proc, thread or core) + * It is now OK to start creating EOs, queues etc. + * + * Note that only one core needs to create the shared memory, EO's, + * queues etc. needed by the application, all other cores need only + * look up the shared mem and go directly into the em_dispatch()-loop, + * where they are ready to process events as soon as the EOs have been + * started and queues enabled. + */ + + /* Ensure all EM cores can find the default event pool */ + if (em_pool_find(EM_POOL_DEFAULT_NAME) != EM_POOL_DEFAULT) + APPL_EXIT_FAILURE("em_pool_find(%s) c:%d", + EM_POOL_DEFAULT_NAME, em_core_id()); + + /* Look up the shared memory */ + test_init(); + } + + const int core_id = em_core_id(); + const uint64_t cores = (uint64_t)em_core_count(); + + const char *str = appl_conf->dispatch_rounds == 0 ? + "forever" : "rounds"; + + APPL_PRINT("Entering the event dispatch loop(%s=%d) on EM-core:%02d\n", + str, appl_conf->dispatch_rounds, core_id); + + /* + * Keep all cores dispatching until 'test_start()' has been + * completed in order to handle sync-API function calls and to enter + * the main dispatch loop almost at the same time. + */ + env_atomic64_inc(&sync->enter_count); + do { + em_dispatch(STARTUP_DISPATCH_ROUNDS); + if (core_id == 0) { + /* Start pktio if configured */ + if (appl_conf->pktio.if_count > 0) + pktio_start(); + env_atomic64_inc(&sync->enter_count); + } + } while (env_atomic64_get(&sync->enter_count) <= 2 * cores); +} + +/** + * Core runner - application entry on each EM-core + * + * Application setup and event dispatch loop run by each EM-core. + * A call to em_init_core() MUST be made on each EM-core before using other + * EM API functions to create EOs, queues etc. or calling em_dispatch(). + * + * @param arg passed arg actually of type 'appl_shm_t *', i.e. appl shared mem + */ +static int +run_core_fn(void *arg) +{ + odp_shm_t shm; + appl_shm_t *appl_shm; + void *shm_addr; + appl_conf_t *appl_conf; + sync_t *sync; + em_status_t stat; + bool is_thread_per_core; + + /* thread: depend on the odp helper to call odp_init_local */ + /* process: parent called odp_init_local, fork creates copy for child */ + + appl_shm = (appl_shm_t *)arg; + + /* Look up the appl shared memory - sanity check */ + shm = odp_shm_lookup("appl_shm"); + if (unlikely(shm == ODP_SHM_INVALID)) + APPL_EXIT_FAILURE("appl_shm lookup failed"); + shm_addr = odp_shm_addr(shm); + if (unlikely(shm_addr == NULL || shm_addr != (void *)appl_shm)) + APPL_EXIT_FAILURE("obtaining shared mem addr failed:\n" + "shm_addr:%p appl_shm:%p", + shm_addr, appl_shm); + + appl_conf = &appl_shm->appl_conf; + sync = &appl_shm->sync; + is_thread_per_core = appl_shm->em_conf.thread_per_core ? true : false; + + /* + * Allow testing different startup scenarios: + */ + switch (appl_conf->startup_mode) { + case STARTUP_ALL_CORES: + /* + * All EM-cores start-up and init before appication setup + */ + startup_all_cores(sync, appl_conf, is_thread_per_core); + break; + case STARTUP_ONE_CORE_FIRST: + /* + * Only one EM-core start-up and init before application setup, + * the rest of the EM-cores are init after that. + */ + startup_one_core_first(sync, appl_conf, is_thread_per_core); + break; + default: + APPL_EXIT_FAILURE("Unsupported startup-mode:%d", + appl_conf->startup_mode); + break; + } + + APPL_PRINT("%s() on EM-core:%02d\n", __func__, em_core_id()); + + /* + * Enter the EM event dispatch loop (0==forever) on this EM-core. + */ + int core_id = em_core_id(); + uint64_t cores = (uint64_t)em_core_count(); + uint32_t dispatch_rounds = appl_conf->dispatch_rounds; + uint32_t exit_check_rounds = EXIT_CHECK_DISPATCH_ROUNDS; + uint32_t rounds; + + if (dispatch_rounds == 0) { + /* + * Dispatch forever, in chunks of 'exit_check_rounds', + * or until 'exit_flag' is set by SIGINT (CTRL-C). + */ + while (!appl_shm->exit_flag) + em_dispatch(exit_check_rounds); + } else { + /* + * Dispatch for 'dispatch_rounds' in chunks of 'rounds', + * or until 'exit_flag' is set by SIGINT (CTRL-C). + */ + rounds = MIN(dispatch_rounds, exit_check_rounds); + do { + em_dispatch(rounds); + dispatch_rounds -= rounds; + } while (dispatch_rounds > rounds && !appl_shm->exit_flag); + + if (dispatch_rounds > 0) { + rounds = MIN(dispatch_rounds, rounds); + em_dispatch(rounds); + } + } + /* + * Allow apps one more round with 'exit_flag' set to flush events from + * the sched queues etc. + */ + if (!appl_shm->exit_flag) + appl_shm->exit_flag = 1; /* potential race with SIGINT-handler*/ + em_dispatch(exit_check_rounds); + + /* + * Dispatch-loop done for application, prepare for controlled shutdown + */ + + uint64_t exit_count = env_atomic64_return_add(&sync->exit_count, 1); + + /* First core to exit dispatch stops the application */ + if (exit_count == 0) { + if (appl_conf->pktio.if_count > 0) { + /* halt further pktio rx & tx */ + pktio_halt(); + /* dispatch with pktio stopped before test_stop()*/ + em_dispatch(TERM_DISPATCH_ROUNDS); + } + /* + * Stop and delete created application EOs + */ + test_stop(appl_conf); + } + + /* + * Continue dispatching until all cores have exited the dispatch loop + * and until 'test_stop()' has been completed, the cores might have to + * react to teardown related events such as EM function completion + * events & notifs. + */ + do { + em_dispatch(TERM_DISPATCH_ROUNDS); + if (exit_count == 0) { + /* + * First core to exit increments 'exit_count' twice - + * this ensures that all other cores will stay in this + * dispatch loop until the first core reaches the loop. + */ + env_atomic64_inc(&sync->exit_count); + } + exit_count = env_atomic64_get(&sync->exit_count); + } while (exit_count <= cores); + + /* + * Proper application teardown should have been completed on all cores, + * still do some 'empty' dispatch rounds to drain all possibly + * remaining events in the system. + */ + while (em_dispatch(TERM_DISPATCH_ROUNDS) > 0) + ; + + APPL_PRINT("Left the event dispatch loop on EM-core:%02d\n", core_id); + + odp_barrier_wait(&sync->exit_barrier); + + if (core_id == 0) { + /* + * Free allocated test resources + */ + test_term(); + } + + odp_barrier_wait(&sync->exit_barrier); + + stat = em_term_core(); + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_term_core(%d):%" PRI_STAT "", + core_id, stat); + + odp_barrier_wait(&sync->exit_barrier); + + /* depend on the odp helper to call odp_term_local */ + + return 0; +} + +/** + * Parse and store relevant command line arguments. Set config options for both + * application and EM. + * + * EM options are stored into em_conf and application specific options into + * appl_conf. Note that both application and EM parsing is done here since EM + * should not, by design, be concerned with the parsing of options, instead + * em_conf_t specifies the options needed by the EM-implementation (HW, device + * and env specific). + * + * @param argc Command line argument count + * @param argv[] Command line arguments + * @param em_conf EM config options parsed from argv[] + * @param appl_conf Application config options parsed from argv[] + */ +static void +parse_args(int argc, char *argv[], parse_args_t *parsed /* out param */) +{ + static const struct option longopts[] = { + {"coremask", required_argument, NULL, 'c'}, + {"process-per-core", no_argument, NULL, 'p'}, + {"thread-per-core", no_argument, NULL, 't'}, + {"device-id", required_argument, NULL, 'd'}, + {"dispatch-rounds", required_argument, NULL, 'r'}, + {"eth-interface", required_argument, NULL, 'i'}, + {"pktpool-em", no_argument, NULL, 'e'}, + {"pktpool-odp", no_argument, NULL, 'o'}, + {"pktin-mode", required_argument, NULL, 'm'}, + {"pktin-vector", no_argument, NULL, 'v'}, + {"startup-mode", required_argument, NULL, 's'}, + {"vecpool-em", no_argument, NULL, 'x'}, + {"vecpool-odp", no_argument, NULL, 'y'}, + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} + }; + static const char *shortopts = "+c:ptd:r:i:oem:vs:xyh"; + long device_id = -1; + + /* set defaults: */ + parsed->args_appl.pktio.in_mode = DIRECT_RECV; + parsed->args_appl.startup_mode = STARTUP_ALL_CORES; + + opterr = 0; /* don't complain about unknown options here */ + + APPL_PRINT("EM application options:\n"); + + /* + * Parse the application & EM arguments and save core mask. + * Note: Use '+' at the beginning of optstring: + * - don't permute the contents of argv[]. + * Note: Stops at "--" + */ + while (1) { + int opt; + int long_index; + + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 'c': { /* --coremask */ + char *mask_str = optarg; + char tmp_str[EM_CORE_MASK_STRLEN]; + int err; + + /* + * Store the core mask for EM - usage depends on the + * process-per-core or thread-per-core mode selected. + */ + em_core_mask_zero(&parsed->args_em.phys_mask); + err = em_core_mask_set_str(mask_str, + &parsed->args_em.phys_mask); + if (err) + APPL_EXIT_FAILURE("Invalid coremask(%s) given", + mask_str); + + parsed->args_em.core_count = + em_core_mask_count(&parsed->args_em.phys_mask); + + em_core_mask_tostr(tmp_str, sizeof(tmp_str), + &parsed->args_em.phys_mask); + APPL_PRINT(" Coremask: %s\n" + " Core Count: %i\n", + tmp_str, parsed->args_em.core_count); + } + break; + + case 'p': /* --process-per-core */ + parsed->args_em.process_per_core = 1; + break; + + case 't': /* --thread-per-core */ + parsed->args_em.thread_per_core = 1; + break; + + case 'd': { /* --device-id */ + char *endptr; + + device_id = strtol(optarg, &endptr, 0); + + if (*endptr != '\0' || + (uint64_t)device_id > UINT16_MAX) + APPL_EXIT_FAILURE("Invalid device-id:%s", + optarg); + + parsed->args_em.device_id = (uint16_t)(device_id & 0xffff); + } + break; + + case 'r': /* --dispatch-rounds */ + parsed->args_appl.dispatch_rounds = atoi(optarg); + if (atoi(optarg) < 0) + APPL_EXIT_FAILURE("Invalid dispatch-rounds:%s", + optarg); + break; + + case 'i': { /* --eth-interface */ + int i; + size_t len, max; + char *name; + + name = strtok(optarg, ","); + for (i = 0; name != NULL; i++) { + if (i > IF_MAX_NUM - 1) + APPL_EXIT_FAILURE("Too many if's:%d", + i + 1); + max = sizeof(parsed->args_appl.pktio.if_name[i]); + len = strnlen(name, max); + if (len + 1 > max) + APPL_EXIT_FAILURE("Invalid if name:%s", + name); + + strncpy(parsed->args_appl.pktio.if_name[i], name, len); + parsed->args_appl.pktio.if_name[i][len + 1] = '\0'; + + name = strtok(NULL, ","); + } + parsed->args_appl.pktio.if_count = i; + } + break; + + case 'e': /* --pktpool-em */ + parsed->args_appl.pktio.pktpool_em = true; + break; + + case 'o': /* --pktpool-odp */ + parsed->args_appl.pktio.pktpool_odp = true; + break; + + case 'm': { /* --pktin-mode */ + int mode = atoi(optarg); + + if (mode == 0) { + parsed->args_appl.pktio.in_mode = DIRECT_RECV; + } else if (mode == 1) { + parsed->args_appl.pktio.in_mode = PLAIN_QUEUE; + } else if (mode == 2) { + parsed->args_appl.pktio.in_mode = SCHED_PARALLEL; + } else if (mode == 3) { + parsed->args_appl.pktio.in_mode = SCHED_ATOMIC; + } else if (mode == 4) { + parsed->args_appl.pktio.in_mode = SCHED_ORDERED; + } else { + usage(argv[0]); + APPL_EXIT_FAILURE("Unknown value: -m, --pktin-mode = %d", mode); + } + } + break; + + case 'v': { /* --pktin-vector */ + parsed->args_appl.pktio.pktin_vector = true; + } + break; + + case 's': { /* --startup-mode */ + int mode = atoi(optarg); + + if (mode == 0) { + parsed->args_appl.startup_mode = STARTUP_ALL_CORES; + } else if (mode == 1) { + parsed->args_appl.startup_mode = STARTUP_ONE_CORE_FIRST; + } else { + usage(argv[0]); + APPL_EXIT_FAILURE("Unknown value: -s, --startup-mode = %d", mode); + } + } + break; + + case 'x': /* --vecpool-em, only used if --pktin-vector given */ + parsed->args_appl.pktio.vecpool_em = true; + break; + + case 'y': /* --vecpool-odp, only used if --pktin-vector given */ + parsed->args_appl.pktio.vecpool_odp = true; + break; + + case 'h': /* --help */ + usage(argv[0]); + exit(EXIT_SUCCESS); + break; + + default: + usage(argv[0]); + APPL_EXIT_FAILURE("Unknown option: %c!", opt); + break; + } + } + + optind = 1; /* reset 'extern optind' from the getopt lib */ + + /* Sanity check: */ + if (!parsed->args_em.core_count) { + usage(argv[0]); + APPL_EXIT_FAILURE("Give mandatory coremask!"); + } + + /* Check if a device-id was given, if not use the default '0' */ + if (device_id == -1) /* not set */ + parsed->args_em.device_id = 0; + APPL_PRINT(" Device-id: 0x%" PRIX16 "\n", parsed->args_em.device_id); + + /* Sanity checks: */ + if (!(parsed->args_em.process_per_core ^ parsed->args_em.thread_per_core)) { + usage(argv[0]); + APPL_EXIT_FAILURE("Select EITHER:\n" + "process-per-core(-p) OR thread-per-core(-t)!"); + } + if (parsed->args_em.thread_per_core) + APPL_PRINT(" EM mode: Thread-per-core\n"); + else + APPL_PRINT(" EM mode: Process-per-core\n"); + + const char *startup_mode_str = ""; + + /* Startup-mode */ + if (parsed->args_appl.startup_mode == STARTUP_ALL_CORES) + startup_mode_str = "All EM-cores before application"; + else if (parsed->args_appl.startup_mode == STARTUP_ONE_CORE_FIRST) + startup_mode_str = "One EM-core before application (then the rest)"; + /* other values are reported as errors earlier in parsing */ + + APPL_PRINT(" Startup-mode: %s\n", startup_mode_str); + + /* Store the application name */ + size_t len = sizeof(parsed->args_appl.name); + + strncpy(parsed->args_appl.name, NO_PATH(argv[0]), len); + parsed->args_appl.name[len - 1] = '\0'; + + /* Packet I/O */ + if (parsed->args_appl.pktio.if_count > 0) { + if (parsed->args_appl.pktio.pktpool_em && parsed->args_appl.pktio.pktpool_odp) { + usage(argv[0]); + APPL_EXIT_FAILURE("Select EITHER:\n" + "pktpool-em(-e) OR pktpool-odp(-o)!"); + } + if (!parsed->args_appl.pktio.pktpool_em && !parsed->args_appl.pktio.pktpool_odp) + parsed->args_appl.pktio.pktpool_em = true; /* default if none given */ + + if (parsed->args_appl.pktio.pktpool_em) + APPL_PRINT(" Pktio pool: EM event-pool\n"); + else + APPL_PRINT(" Pktio pool: ODP pkt-pool\n"); + + APPL_PRINT(" Pktin-mode: %s\n", + pktin_mode_str(parsed->args_appl.pktio.in_mode)); + + if (parsed->args_appl.pktio.pktin_vector) { + APPL_PRINT(" Pktin-vector: Enabled\n"); + if (parsed->args_appl.pktio.vecpool_em && + parsed->args_appl.pktio.vecpool_odp) { + usage(argv[0]); + APPL_EXIT_FAILURE("Select EITHER:\n" + "vecpool-em(-x) OR vecpool-odp(-y)!"); + } + if (!parsed->args_appl.pktio.vecpool_em && + !parsed->args_appl.pktio.vecpool_odp) + parsed->args_appl.pktio.vecpool_em = true; /* default */ + + if (parsed->args_appl.pktio.vecpool_em) + APPL_PRINT(" Vector pool: EM vector-pool\n"); + else + APPL_PRINT(" Vector pool: ODP vector-pool\n"); + } else { + APPL_PRINT(" Pktin-vector: Disabled\n"); + parsed->args_appl.pktio.vecpool_em = false; + parsed->args_appl.pktio.vecpool_odp = false; + } + } else { + APPL_PRINT(" Pktio: Not used\n"); + parsed->args_appl.pktio.pktpool_em = false; + parsed->args_appl.pktio.pktpool_odp = false; + parsed->args_appl.pktio.vecpool_em = false; + parsed->args_appl.pktio.vecpool_odp = false; + } +} + +/** + * Verify the cpu setup - sanity check and store cpus to use + * + * Verify the cpu count and mask against system values + */ +static void +verify_cpu_setup(const parse_args_t *parsed, + cpu_conf_t *cpu_conf /* out */) +{ + odp_cpumask_t invalid_mask; + odp_cpumask_t check_mask; + odp_cpumask_t zero_mask; + int usable_cpus; + cpu_set_t cpuset; + int ret; + + const odp_cpumask_t *cpu_mask = &parsed->args_em.phys_mask.odp_cpumask; + int num_cpus = parsed->args_em.core_count; + + /* + * Verify cpu setup + */ + if (num_cpus > MAX_THREADS) + APPL_EXIT_FAILURE("Setup configured for max %d cores, not %d", + MAX_THREADS, num_cpus); + + odp_cpumask_zero(&invalid_mask); + odp_cpumask_zero(&check_mask); + odp_cpumask_zero(&zero_mask); + usable_cpus = 0; + + CPU_ZERO(&cpuset); + /* get the cpus/cores available to this application */ + ret = sched_getaffinity(0, sizeof(cpuset), &cpuset); + if (ret < 0) + APPL_EXIT_FAILURE("sched_getaffinity:%d errno(%d):%s", + ret, errno, strerror(errno)); + + /* count the usable cpus and also record the invalid cpus */ + for (int i = 0; i < CPU_SETSIZE - 1; i++) { + if (CPU_ISSET(i, &cpuset)) + usable_cpus++; + else + odp_cpumask_set(&invalid_mask, i); + } + + /* + * Make sure no cpu in the cpu_mask is set in the invalid_mask. + * For a valid setup check_mask will be all-zero, otherwise it + * will contain the invalid cpus. + */ + odp_cpumask_and(&check_mask, &invalid_mask, cpu_mask); + if (!odp_cpumask_equal(&zero_mask, &check_mask) || + num_cpus > usable_cpus) { + char cpus_str[ODP_CPUMASK_STR_SIZE]; + char check_str[ODP_CPUMASK_STR_SIZE]; + + memset(cpus_str, '\0', sizeof(cpus_str)); + memset(check_str, '\0', sizeof(check_str)); + odp_cpumask_to_str(cpu_mask, cpus_str, sizeof(cpus_str)); + odp_cpumask_to_str(&check_mask, check_str, sizeof(check_str)); + + APPL_EXIT_FAILURE("Invalid cpus - requested:%d available:%d\n" + "cpu_mask:%s of which invalid-cpus:%s", + num_cpus, usable_cpus, cpus_str, check_str); + } + + /* + * Store the cpu conf to be set up for ODP + */ + odp_cpumask_copy(&cpu_conf->worker_mask, + &parsed->args_em.phys_mask.odp_cpumask); + cpu_conf->num_worker = parsed->args_em.core_count; +} + +/** + * Install a signal handler + */ +static void +install_sig_handler(int signum, void (*sig_handler)(int), int flags) +{ + struct sigaction sa; + + sigemptyset(&sa.sa_mask); + + sa.sa_flags = SA_RESTART; /* restart interrupted system calls */ + sa.sa_flags |= flags; + sa.sa_handler = sig_handler; + + if (sigaction(signum, &sa, NULL) == -1) + APPL_EXIT_FAILURE("sigaction() fails (errno(%i)=%s)", + errno, strerror(errno)); +} + +/** + * Signal handler for SIGINT (e.g. Ctrl-C to stop the program) + */ +static void +sigint_handler(int signo ODP_UNUSED) +{ + if (appl_shm == NULL) + return; + appl_shm->exit_flag = 1; +} + +/** + * Signal handler for SIGCHLD (parent receives when child process dies). + */ +static void +sigchld_handler(int sig ODP_UNUSED) +{ + int status; + pid_t child; + + /* Child-process termination requested, normal tear-down, just return */ + if (appl_shm->exit_flag) + return; + + /* Nonblocking waits until no more dead children are found */ + do { + child = waitpid(-1, &status, WNOHANG); + } while (child > 0); + + if (child == -1 && errno != ECHILD) + _exit(EXIT_FAILURE); + + /* + * Exit the parent process - triggers SIGTERM in the remaining children + * (set by prctl(PR_SET_PDEATHSIG, SIGTERM)). + */ + _exit(EXIT_SUCCESS); +} + +__attribute__((format(printf, 2, 0))) +int appl_vlog(em_log_level_t level, const char *fmt, va_list args) +{ + int r; + FILE *logfd; + + switch (level) { + case EM_LOG_DBG: + case EM_LOG_PRINT: + logfd = stdout; + break; + case EM_LOG_ERR: + default: + logfd = stderr; + break; + } + + r = vfprintf(logfd, fmt, args); + return r; +} + +__attribute__((format(printf, 2, 3))) +int appl_log(em_log_level_t level, const char *fmt, ...) +{ + va_list args; + int r; + + va_start(args, fmt); + r = appl_vlog(level, fmt, args); + va_end(args); + + return r; +} + +/** + * Delay spinloop + */ +void delay_spin(const uint64_t spin_count) +{ + env_atomic64_t dummy; /* use atomic to avoid optimization */ + uint64_t i; + + env_atomic64_init(&dummy); + + if (likely(appl_shm)) { + for (i = 0; i < spin_count && !appl_shm->exit_flag; i++) + env_atomic64_inc(&dummy); + } else { + for (i = 0; i < spin_count; i++) + env_atomic64_inc(&dummy); + } +} diff --git a/programs/common/cm_setup.h b/programs/common/cm_setup.h index 3c449894..ec1e4216 100644 --- a/programs/common/cm_setup.h +++ b/programs/common/cm_setup.h @@ -1,222 +1,236 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef CM_SETUP_H -#define CM_SETUP_H - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include - -#define APPL_NAME_LEN (64) - -#define APPL_POOLS_MAX (16) - -#define PLAT_PARAM_SIZE (8) - -#define MAX_THREADS (128) - -#define IF_NAME_LEN (16) - -#define IF_MAX_NUM (8) - -/** Get rid of path in filename - only for unix-type paths using '/' */ -#define NO_PATH(file_name) (strrchr((file_name), '/') ? \ - strrchr((file_name), '/') + 1 : (file_name)) - -#define APPL_LOG(level, ...) appl_log((level), ## __VA_ARGS__) -#define APPL_VLOG(level, fmt, args) appl_vlog((level), (fmt), (args)) -#define APPL_PRINT(...) APPL_LOG(EM_LOG_PRINT, ## __VA_ARGS__) - -/** Simple appl error handling: log & exit */ -#define APPL_EXIT_FAILURE(...) do { \ - appl_log(EM_LOG_ERR, \ - "Appl Error: %s:%i, %s() - ", \ - NO_PATH(__FILE__), __LINE__, __func__); \ - appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ - appl_log(EM_LOG_ERR, "\n\n"); \ - exit(EXIT_FAILURE); \ -} while (0) - -#define APPL_ERROR(...) do { \ - appl_log(EM_LOG_ERR, \ - "Appl Error: %s:%i, %s() - ", \ - NO_PATH(__FILE__), __LINE__, __func__); \ - appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ - appl_log(EM_LOG_ERR, "\n\n"); \ -} while (0) - -/** - * Application synchronization - */ -typedef struct { - /** Startup synchronization barrier */ - odp_barrier_t start_barrier; - /** Exit / termination synchronization barrier */ - odp_barrier_t exit_barrier; - /** Enter counter for tracking core / odp-thread startup */ - env_atomic64_t enter_count; - /** Exit counter for tracking core / odp-thread exit */ - env_atomic64_t exit_count; -} sync_t; - -/** - * @brief Application startup mode - * - * Enables testing of different startup scenarios. - */ -typedef enum startup_mode { - /** - * Start up & initialize all EM cores before setting up the - * application using EM APIs. The em_init() function has been run and - * all EM-cores have run em_init_core() before application setup. - * Option: -s, --startup-mode = 0 (All EM-cores before application) - */ - STARTUP_ALL_CORES = 0, - /** - * Start up & initialize only one EM core before setting up the - * application using EM APIs. The em_init() function has been run and - * only one EM-core has run em_init_core() before application setup. - * Option: -s, --startup-mode = 1 (One EM-core before application...)) - */ - STARTUP_ONE_CORE_FIRST -} startup_mode_t; - -/** - * @brief Packet input mode - * - * Enables testing different packet-IO input modes - */ -typedef enum pktin_mode_t { - DIRECT_RECV, - PLAIN_QUEUE, - SCHED_PARALLEL, - SCHED_ATOMIC, - SCHED_ORDERED, -} pktin_mode_t; - -/** - * Application configuration - */ -typedef struct { - /** application name */ - char name[APPL_NAME_LEN]; - /** number of processes */ - unsigned int num_procs; - /** number of threads */ - unsigned int num_threads; - /** dispatch rounds before returning */ - uint32_t dispatch_rounds; - /** Start-up mode */ - startup_mode_t startup_mode; - - /** number of memory pools set up for the application */ - unsigned int num_pools; - /** pool ids of the created application pools */ - em_pool_t pools[APPL_POOLS_MAX]; - - /** Packet I/O parameters */ - struct { - /** Packet input mode */ - pktin_mode_t in_mode; - /** Interface count */ - int if_count; - /** Interface names + placeholder for '\0' */ - char if_name[IF_MAX_NUM][IF_NAME_LEN + 1]; - /** Interface identifiers corresponding to 'if_name[]' */ - int if_ids[IF_MAX_NUM]; - /** - * Pktio is setup with an EM event-pool: 'true' - * Pktio is setup with an ODP pkt-pool: 'false' - */ - bool pktpool_em; - } pktio; -} appl_conf_t; - -/** Application shared memory - allocate in single chunk */ -typedef struct { - /** EM configuration*/ - em_conf_t em_conf; - /** Application configuration */ - appl_conf_t appl_conf; - /** Exit the EM-core dispatch loop if set to 1, set by SIGINT handler */ - sig_atomic_t exit_flag; - /** ODP-thread table (from shared memory for process-per-core mode) */ - odph_thread_t thread_tbl[MAX_THREADS]; - /** Application synchronization vars */ - sync_t sync ENV_CACHE_LINE_ALIGNED; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} appl_shm_t; - -/** - * Global pointer to common application shared memory - */ -extern appl_shm_t *appl_shm; - -/** - * Common setup function for the appliations, - * usually called directly from main(). - */ -int cm_setup(int argc, char *argv[]); - -/** - * All examples implement the test_init(), test_start(), test_stop() and - * test_term() functions to keep common main() function. - */ -void test_init(void); - -void test_start(appl_conf_t *const appl_conf); - -void test_stop(appl_conf_t *const appl_conf); - -void test_term(void); - -int appl_vlog(em_log_level_t level, const char *fmt, va_list args); - -__attribute__((format(printf, 2, 3))) -int appl_log(em_log_level_t level, const char *fmt, ...); - -void delay_spin(const uint64_t spin_count); - -#ifdef __cplusplus -} -#endif - -#endif +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CM_SETUP_H +#define CM_SETUP_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include + +#define APPL_NAME_LEN (64) + +#define APPL_POOLS_MAX (16) + +#define PLAT_PARAM_SIZE (8) + +#define MAX_THREADS (128) + +#define IF_NAME_LEN (16) + +#define IF_MAX_NUM (8) + +/** Get rid of path in filename - only for unix-type paths using '/' */ +#define NO_PATH(file_name) (strrchr((file_name), '/') ? \ + strrchr((file_name), '/') + 1 : (file_name)) + +#define APPL_LOG(level, ...) appl_log((level), ## __VA_ARGS__) +#define APPL_VLOG(level, fmt, args) appl_vlog((level), (fmt), (args)) +#define APPL_PRINT(...) APPL_LOG(EM_LOG_PRINT, ## __VA_ARGS__) + +/** Simple appl error handling: log & exit */ +#define APPL_EXIT_FAILURE(...) do { \ + appl_log(EM_LOG_ERR, \ + "Appl Error: %s:%i, %s() - ", \ + NO_PATH(__FILE__), __LINE__, __func__); \ + appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ + appl_log(EM_LOG_ERR, "\n\n"); \ + exit(EXIT_FAILURE); \ +} while (0) + +#define APPL_ERROR(...) do { \ + appl_log(EM_LOG_ERR, \ + "Appl Error: %s:%i, %s() - ", \ + NO_PATH(__FILE__), __LINE__, __func__); \ + appl_log(EM_LOG_ERR, ## __VA_ARGS__); \ + appl_log(EM_LOG_ERR, "\n\n"); \ +} while (0) + +/** + * Application synchronization + */ +typedef struct { + /** Startup synchronization barrier */ + odp_barrier_t start_barrier; + /** Exit / termination synchronization barrier */ + odp_barrier_t exit_barrier; + /** Enter counter for tracking core / odp-thread startup */ + env_atomic64_t enter_count; + /** Exit counter for tracking core / odp-thread exit */ + env_atomic64_t exit_count; +} sync_t; + +/** + * @brief Application startup mode + * + * Enables testing of different startup scenarios. + */ +typedef enum startup_mode { + /** + * Start up & initialize all EM cores before setting up the + * application using EM APIs. The em_init() function has been run and + * all EM-cores have run em_init_core() before application setup. + * Option: -s, --startup-mode = 0 (All EM-cores before application) + */ + STARTUP_ALL_CORES = 0, + /** + * Start up & initialize only one EM core before setting up the + * application using EM APIs. The em_init() function has been run and + * only one EM-core has run em_init_core() before application setup. + * Option: -s, --startup-mode = 1 (One EM-core before application...)) + */ + STARTUP_ONE_CORE_FIRST +} startup_mode_t; + +/** + * @brief Packet input mode + * + * Enables testing different packet-IO input modes + */ +typedef enum pktin_mode_t { + DIRECT_RECV, + PLAIN_QUEUE, + SCHED_PARALLEL, + SCHED_ATOMIC, + SCHED_ORDERED +} pktin_mode_t; + +/** + * @brief Application packet I/O configuration + */ +typedef struct { + /** Packet input mode */ + pktin_mode_t in_mode; + /** Interface count */ + int if_count; + /** Interface names + placeholder for '\0' */ + char if_name[IF_MAX_NUM][IF_NAME_LEN + 1]; + /** Interface identifiers corresponding to 'if_name[]' */ + int if_ids[IF_MAX_NUM]; + /** + * Pktio is setup with an EM event-pool: 'true' + * Pktio is setup with an ODP pkt-pool: 'false' + */ + bool pktpool_em; + + /** Packet input vectors enabled (true/false) */ + bool pktin_vector; + /** + * If pktin_vector: + * Pktio is setup with an EM vector-pool: 'true' + * Pktio is setup with an ODP vector-pool: 'false' + */ + bool vecpool_em; +} pktio_conf_t; + +/** + * @brief Application configuration + */ +typedef struct { + /** application name */ + char name[APPL_NAME_LEN]; + /** number of processes */ + unsigned int num_procs; + /** number of threads */ + unsigned int num_threads; + /** dispatch rounds before returning */ + uint32_t dispatch_rounds; + /** Start-up mode */ + startup_mode_t startup_mode; + + /** number of memory pools set up for the application */ + unsigned int num_pools; + /** pool ids of the created application pools */ + em_pool_t pools[APPL_POOLS_MAX]; + + /** Packet I/O parameters */ + pktio_conf_t pktio; +} appl_conf_t; + +/** Application shared memory - allocate in single chunk */ +typedef struct { + /** EM configuration*/ + em_conf_t em_conf; + /** Application configuration */ + appl_conf_t appl_conf; + /** Exit the EM-core dispatch loop if set to 1, set by SIGINT handler */ + sig_atomic_t exit_flag; + /** ODP-thread table (from shared memory for process-per-core mode) */ + odph_thread_t thread_tbl[MAX_THREADS]; + /** Application synchronization vars */ + sync_t sync ENV_CACHE_LINE_ALIGNED; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} appl_shm_t; + +/** + * Global pointer to common application shared memory + */ +extern appl_shm_t *appl_shm; + +/** + * Common setup function for the appliations, + * usually called directly from main(). + */ +int cm_setup(int argc, char *argv[]); + +/** + * All examples implement the test_init(), test_start(), test_stop() and + * test_term() functions to keep common main() function. + */ +void test_init(void); + +void test_start(appl_conf_t *const appl_conf); + +void test_stop(appl_conf_t *const appl_conf); + +void test_term(void); + +int appl_vlog(em_log_level_t level, const char *fmt, va_list args); + +__attribute__((format(printf, 2, 3))) +int appl_log(em_log_level_t level, const char *fmt, ...); + +void delay_spin(const uint64_t spin_count); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/programs/example/api-hooks/api_hooks.c b/programs/example/api-hooks/api_hooks.c index 5be123b4..bc98188a 100644 --- a/programs/example/api-hooks/api_hooks.c +++ b/programs/example/api-hooks/api_hooks.c @@ -1,640 +1,640 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine API callback hooks example. - * - * Based on the dispatcher callback example. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -#define SPIN_COUNT 50000000 - -/** - * Test ping event - */ -typedef struct { - /* Destination queue for the reply event */ - em_queue_t dest; - /* Sequence number */ - unsigned int seq; -} ping_event_t; - -/** - * EO context in the API hooks test - */ -typedef struct { - /* Init before start */ - em_eo_t this_eo; - em_eo_t other_eo; - em_queue_t my_queue; - int is_a; - /* Init in start */ - char name[16]; -} my_eo_context_t; - -/** - * Queue context data - */ -typedef struct { - em_queue_t queue; -} my_queue_context_t; - -/** - * Test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - /* Allocate EO contexts from shared memory region */ - my_eo_context_t eo_context_a; - my_eo_context_t eo_context_b; - /* Queue context */ - my_queue_context_t queue_context_a; - my_queue_context_t queue_context_b; - /* EO A's queue */ - em_queue_t queue_a; - /* EO B's queue */ - em_queue_t queue_b; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} test_shm_t; - -COMPILE_TIME_ASSERT((sizeof(test_shm_t) % ENV_CACHE_LINE_SIZE) == 0, - TEST_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL test_shm_t *test_shm; - -static em_status_t -ping_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t -ping_stop(void *eo_ctx, em_eo_t eo); -static void -ping_receive(void *eo_ctx, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -/* Callback & hook functions */ -static void -enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx); -static void -exit_cb(em_eo_t eo); - -static void -alloc_hook(const em_event_t events[/*num_act*/], int num_act, int num_req, - size_t size, em_event_type_t type, em_pool_t pool); -static void -free_hook(const em_event_t events[], int num); -static void -send_hook(const em_event_t events[], int num, - em_queue_t queue, em_event_group_t event_group); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the API Hooks test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - test_shm = env_shared_reserve("TestSharedMem", - sizeof(test_shm_t)); - em_register_error_handler(test_error_handler); - } else { - test_shm = env_shared_lookup("TestSharedMem"); - } - - if (test_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Dispatcher callback init failed on EM-core: %u\n", - em_core_id()); - } else if (core == 0) { - memset(test_shm, 0, sizeof(test_shm_t)); - } -} - -/** - * Startup of the API Hooks test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo_a, eo_b; - em_status_t ret; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - test_shm->pool = appl_conf->pools[0]; - else - test_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - test_shm->pool); - - test_fatal_if(test_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* Create both EOs */ - eo_a = em_eo_create("EO A", ping_start, NULL, ping_stop, NULL, - ping_receive, &test_shm->eo_context_a); - test_fatal_if(eo_a == EM_EO_UNDEF, "EO A creation failed!"); - - eo_b = em_eo_create("EO B", ping_start, NULL, ping_stop, NULL, - ping_receive, &test_shm->eo_context_b); - test_fatal_if(eo_b == EM_EO_UNDEF, "EO B creation failed!"); - - /* Init EO contexts */ - test_shm->eo_context_a.this_eo = eo_a; - test_shm->eo_context_a.other_eo = eo_b; - test_shm->eo_context_a.is_a = 1; - - test_shm->eo_context_b.this_eo = eo_b; - test_shm->eo_context_b.other_eo = eo_a; - test_shm->eo_context_b.is_a = 0; - - /* Register/unregister callback functions. - * - * Callback functions may be registered multiple times and unregister - * function removes only the first matching callback. - * - * Register each callback twice and then remove one - for testing - * purposes only. - */ - ret = em_dispatch_register_enter_cb(enter_cb); - test_fatal_if(ret != EM_OK, "enter_cb() register failed!"); - ret = em_dispatch_register_enter_cb(enter_cb); - test_fatal_if(ret != EM_OK, "enter_cb() register failed!"); - ret = em_dispatch_unregister_enter_cb(enter_cb); - test_fatal_if(ret != EM_OK, "enter_cb() unregister failed!"); - - ret = em_dispatch_register_exit_cb(exit_cb); - test_fatal_if(ret != EM_OK, "exit_cb() register failed!"); - ret = em_dispatch_register_exit_cb(exit_cb); - test_fatal_if(ret != EM_OK, "exit_cb() register failed!"); - ret = em_dispatch_unregister_exit_cb(exit_cb); - test_fatal_if(ret != EM_OK, "exit_cb() unregister failed!"); - - /* - * Register EM API hooks. - * Register each hook twice and then remove one - for testing - * purposes only. - */ - ret = em_hooks_register_alloc(alloc_hook); - test_fatal_if(ret != EM_OK, "alloc_hook() register failed!"); - ret = em_hooks_register_alloc(alloc_hook); - test_fatal_if(ret != EM_OK, "alloc_hook() register failed!"); - ret = em_hooks_unregister_alloc(alloc_hook); - test_fatal_if(ret != EM_OK, "alloc_hook() unregister failed!"); - - ret = em_hooks_register_free(free_hook); - test_fatal_if(ret != EM_OK, "free_hook() register failed!"); - ret = em_hooks_register_free(free_hook); - test_fatal_if(ret != EM_OK, "free_hook() register failed!"); - ret = em_hooks_unregister_free(free_hook); - - ret = em_hooks_register_send(send_hook); - test_fatal_if(ret != EM_OK, "send_hook() register failed!"); - ret = em_hooks_register_send(send_hook); - test_fatal_if(ret != EM_OK, "send_hook() register failed!"); - ret = em_hooks_unregister_send(send_hook); - test_fatal_if(ret != EM_OK, "send_hook() unregister failed!"); - - /* Start EO A */ - ret = em_eo_start_sync(eo_a, NULL, NULL); - test_fatal_if(ret != EM_OK, "em_eo_start_sync(eo_a) failed!"); - - /* Start EO B */ - ret = em_eo_start_sync(eo_b, NULL, NULL); - test_fatal_if(ret != EM_OK, "em_eo_start_sync(eo_b) failed!"); - - /* - * Send the first event to EO A's queue. - * Store the following destination queue into the event. - */ - em_event_t event; - ping_event_t *ping; - - event = em_alloc(sizeof(ping_event_t), EM_EVENT_TYPE_SW, - test_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event allocation failed!"); - - ping = em_event_pointer(event); - ping->dest = test_shm->queue_b; - ping->seq = 0; - - ret = em_send(event, test_shm->queue_a); - test_fatal_if(ret != EM_OK, - "em_send():%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, test_shm->queue_a); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - const em_eo_t eo_a = test_shm->eo_context_a.this_eo; - const em_eo_t eo_b = test_shm->eo_context_b.this_eo; - em_status_t stat; - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - stat = em_dispatch_unregister_enter_cb(enter_cb); - test_fatal_if(stat != EM_OK, "enter_cb() unregister failed!"); - - stat = em_dispatch_unregister_exit_cb(exit_cb); - test_fatal_if(stat != EM_OK, "exit_cb() unregister failed!"); - - stat = em_hooks_unregister_alloc(alloc_hook); - test_fatal_if(stat != EM_OK, "alloc_hook() unregister failed!"); - - stat = em_hooks_unregister_free(free_hook); - test_fatal_if(stat != EM_OK, "free_hook() unregister failed!"); - - stat = em_hooks_unregister_send(send_hook); - test_fatal_if(stat != EM_OK, "send_hook() unregister failed!"); - - stat = em_eo_stop_sync(eo_a); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO A stop failed!"); - stat = em_eo_stop_sync(eo_b); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO B stop failed!"); - - stat = em_eo_delete(eo_a); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO A delete failed!"); - stat = em_eo_delete(eo_b); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO B delete failed!"); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(test_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -ping_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf) -{ - my_eo_context_t *const my_eo_ctx = eo_ctx; - em_queue_t queue; - em_status_t status; - my_queue_context_t *my_q_ctx; - const char *queue_name; - - (void)conf; - - /* Copy EO name */ - em_eo_get_name(eo, my_eo_ctx->name, sizeof(my_eo_ctx->name)); - - if (my_eo_ctx->is_a) { - queue_name = "queue A"; - my_q_ctx = &test_shm->queue_context_a; - } else { - queue_name = "queue B"; - my_q_ctx = &test_shm->queue_context_b; - } - - queue = em_queue_create(queue_name, EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, EM_QUEUE_GROUP_DEFAULT, - NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "%s creation failed!", queue_name); - - my_eo_ctx->my_queue = queue; /* for ping_stop() */ - my_q_ctx->queue = queue; - - status = em_queue_set_context(queue, my_q_ctx); - test_fatal_if(status != EM_OK, - "Set queue context:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", status, eo, queue); - - status = em_eo_add_queue_sync(eo, queue); - test_fatal_if(status != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", status, eo, queue); - - APPL_PRINT("Test start %s: EO %" PRI_EO ", queue:%" PRI_QUEUE ".\n", - my_eo_ctx->name, eo, queue); - - if (my_eo_ctx->is_a) - test_shm->queue_a = queue; - else - test_shm->queue_b = queue; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t -ping_stop(void *eo_ctx, em_eo_t eo) -{ - my_eo_context_t *const my_eo_ctx = eo_ctx; - em_queue_t queue = my_eo_ctx->my_queue; - em_status_t status; - - APPL_PRINT("Dispatcher callback example stop (%s, eo id %" PRI_EO ")\n", - my_eo_ctx->name, eo); - - status = em_eo_remove_queue_sync(eo, queue); - if (status != EM_OK) - return status; - - status = em_queue_delete(queue); - if (status != EM_OK) - return status; - - return EM_OK; -} - -/** - * @private - * - * EO receive function. - * - * Print "Event received" and send back to the sender of the event. - * - */ -static void -ping_receive(void *eo_ctx, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - my_eo_context_t *const my_eo_ctx = eo_ctx; - em_queue_t dest; - em_status_t status; - ping_event_t *ping, *new_ping; - em_event_t new_event; - (void)type; - (void)q_ctx; - - ping = em_event_pointer(event); - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - dest = ping->dest; - ping->dest = queue; - - APPL_PRINT("** EO-rcv: Ping from EO:'%s'(%" PRI_EO ") on core%02d!\t" - "Queue:%" PRI_QUEUE "\t\t" - "Event:%" PRI_EVENT " Event-seq:%u\n", - my_eo_ctx->name, my_eo_ctx->this_eo, em_core_id(), - queue, event, ping->seq++); - - new_event = em_alloc(sizeof(ping_event_t), EM_EVENT_TYPE_SW, - test_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event allocation failed!"); - new_ping = em_event_pointer(new_event); - memcpy(new_ping, ping, sizeof(ping_event_t)); - em_free(event); - - delay_spin(SPIN_COUNT); - - status = em_send(new_event, dest); - if (unlikely(status != EM_OK)) { - em_free(new_event); - test_fatal_if(!appl_shm->exit_flag, - "em_send():%" PRI_STAT "EO:%" PRI_EO "\n" - "Rcv-Q:%" PRI_QUEUE " Dst-Q:%" PRI_QUEUE "", - status, my_eo_ctx->this_eo, queue, dest); - } -} - -/** - * Callback functions - */ - -static void -enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx) -{ - my_eo_context_t *my_eo_ctx = *eo_ctx; - my_queue_context_t *my_q_ctx = *q_ctx; - ping_event_t *ping; - em_event_t event = events[0]; - - (void)num; /* 1 event at a time here */ - (void)queue; - - ping = em_event_pointer(event); - - APPL_PRINT("\n" - "+ Dispatch enter callback EO:'%s'(%" PRI_EO ")\t" - "Queue:%" PRI_QUEUE " on core%02i\t" - "Event:%" PRI_EVENT " Event-seq:%u\n", - my_eo_ctx->name, eo, my_q_ctx->queue, em_core_id(), - event, ping->seq); -} - -static void -exit_cb(em_eo_t eo) -{ - my_eo_context_t *my_eo_ctx = em_eo_get_context(eo); - - APPL_PRINT("- Dispatch exit callback EO:'%s'(%" PRI_EO ")\n", - my_eo_ctx->name, eo); -} - -static void -alloc_hook(const em_event_t events[/*num_act*/], int num_act, int num_req, - size_t size, em_event_type_t type, em_pool_t pool) -{ - em_eo_t eo, eo_a, eo_b; - void *eo_ctx; - my_eo_context_t *my_eo_ctx; - - (void)num_req; - - eo = em_eo_current(); - if (unlikely(eo == EM_EO_UNDEF)) - return; - eo_ctx = em_eo_get_context(eo); - if (unlikely(eo_ctx == NULL)) - return; - - /* Only print stuff for this test's EOs */ - if (unlikely(test_shm == NULL)) - return; - eo_a = test_shm->eo_context_a.this_eo; - eo_b = test_shm->eo_context_a.other_eo; - if (eo != eo_a && eo != eo_b) - return; - - my_eo_ctx = eo_ctx; - - APPL_PRINT(" Alloc-hook EO:'%s'(%" PRI_EO ")\t" - "sz:%zu type:0x%x pool:%" PRI_POOL "\t\t" - "Events[%d]:", - my_eo_ctx->name, eo, size, type, pool, - num_act); - for (int i = 0; i < num_act; i++) - APPL_PRINT(" %" PRI_EVENT "", events[i]); - APPL_PRINT("\n"); -} - -static void -free_hook(const em_event_t events[], int num) -{ - em_eo_t eo, eo_a, eo_b; - void *eo_ctx; - my_eo_context_t *my_eo_ctx; - - eo = em_eo_current(); - if (unlikely(eo == EM_EO_UNDEF)) - return; - eo_ctx = em_eo_get_context(eo); - if (unlikely(eo_ctx == NULL)) - return; - - /* Only print stuff for this test's EOs */ - if (unlikely(test_shm == NULL)) - return; - eo_a = test_shm->eo_context_a.this_eo; - eo_b = test_shm->eo_context_a.other_eo; - if (eo != eo_a && eo != eo_b) - return; - - my_eo_ctx = eo_ctx; - - APPL_PRINT(" Free-hook EO:'%s'(%" PRI_EO ")\t\t\t\t\t\t" - "Events[%d]:", my_eo_ctx->name, eo, num); - for (int i = 0; i < num; i++) - APPL_PRINT(" %" PRI_EVENT "", events[i]); - APPL_PRINT("\n"); -} - -static void -send_hook(const em_event_t events[], int num, - em_queue_t queue, em_event_group_t event_group) -{ - em_eo_t eo, eo_a, eo_b; - void *eo_ctx; - my_eo_context_t *my_eo_ctx; - - (void)events; - (void)event_group; - - eo = em_eo_current(); - if (unlikely(eo == EM_EO_UNDEF)) - return; - eo_ctx = em_eo_get_context(eo); - if (unlikely(eo_ctx == NULL)) - return; - - /* Only print stuff for this test's EOs */ - if (unlikely(test_shm == NULL)) - return; - eo_a = test_shm->eo_context_a.this_eo; - eo_b = test_shm->eo_context_a.other_eo; - if (eo != eo_a && eo != eo_b) - return; - - my_eo_ctx = eo_ctx; - - APPL_PRINT(" Send-hook EO:'%s'(%" PRI_EO ")\t" - "%d event(s)\tQueue:%" PRI_QUEUE " ==> %" PRI_QUEUE "\t" - "Events[%d]:", - my_eo_ctx->name, eo, num, em_queue_current(), queue, num); - for (int i = 0; i < num; i++) - APPL_PRINT(" %" PRI_EVENT "", events[i]); - APPL_PRINT("\n"); -} +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine API callback hooks example. + * + * Based on the dispatcher callback example. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +#define SPIN_COUNT 50000000 + +/** + * Test ping event + */ +typedef struct { + /* Destination queue for the reply event */ + em_queue_t dest; + /* Sequence number */ + unsigned int seq; +} ping_event_t; + +/** + * EO context in the API hooks test + */ +typedef struct { + /* Init before start */ + em_eo_t this_eo; + em_eo_t other_eo; + em_queue_t my_queue; + int is_a; + /* Init in start */ + char name[16]; +} my_eo_context_t; + +/** + * Queue context data + */ +typedef struct { + em_queue_t queue; +} my_queue_context_t; + +/** + * Test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + /* Allocate EO contexts from shared memory region */ + my_eo_context_t eo_context_a; + my_eo_context_t eo_context_b; + /* Queue context */ + my_queue_context_t queue_context_a; + my_queue_context_t queue_context_b; + /* EO A's queue */ + em_queue_t queue_a; + /* EO B's queue */ + em_queue_t queue_b; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} test_shm_t; + +COMPILE_TIME_ASSERT((sizeof(test_shm_t) % ENV_CACHE_LINE_SIZE) == 0, + TEST_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL test_shm_t *test_shm; + +static em_status_t +ping_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t +ping_stop(void *eo_ctx, em_eo_t eo); +static void +ping_receive(void *eo_ctx, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +/* Callback & hook functions */ +static void +enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx); +static void +exit_cb(em_eo_t eo); + +static void +alloc_hook(const em_event_t events[/*num_act*/], int num_act, int num_req, + uint32_t size, em_event_type_t type, em_pool_t pool); +static void +free_hook(const em_event_t events[], int num); +static void +send_hook(const em_event_t events[], int num, + em_queue_t queue, em_event_group_t event_group); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the API Hooks test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + test_shm = env_shared_reserve("TestSharedMem", + sizeof(test_shm_t)); + em_register_error_handler(test_error_handler); + } else { + test_shm = env_shared_lookup("TestSharedMem"); + } + + if (test_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Dispatcher callback init failed on EM-core: %u\n", + em_core_id()); + } else if (core == 0) { + memset(test_shm, 0, sizeof(test_shm_t)); + } +} + +/** + * Startup of the API Hooks test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo_a, eo_b; + em_status_t ret; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + test_shm->pool = appl_conf->pools[0]; + else + test_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + test_shm->pool); + + test_fatal_if(test_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* Create both EOs */ + eo_a = em_eo_create("EO A", ping_start, NULL, ping_stop, NULL, + ping_receive, &test_shm->eo_context_a); + test_fatal_if(eo_a == EM_EO_UNDEF, "EO A creation failed!"); + + eo_b = em_eo_create("EO B", ping_start, NULL, ping_stop, NULL, + ping_receive, &test_shm->eo_context_b); + test_fatal_if(eo_b == EM_EO_UNDEF, "EO B creation failed!"); + + /* Init EO contexts */ + test_shm->eo_context_a.this_eo = eo_a; + test_shm->eo_context_a.other_eo = eo_b; + test_shm->eo_context_a.is_a = 1; + + test_shm->eo_context_b.this_eo = eo_b; + test_shm->eo_context_b.other_eo = eo_a; + test_shm->eo_context_b.is_a = 0; + + /* Register/unregister callback functions. + * + * Callback functions may be registered multiple times and unregister + * function removes only the first matching callback. + * + * Register each callback twice and then remove one - for testing + * purposes only. + */ + ret = em_dispatch_register_enter_cb(enter_cb); + test_fatal_if(ret != EM_OK, "enter_cb() register failed!"); + ret = em_dispatch_register_enter_cb(enter_cb); + test_fatal_if(ret != EM_OK, "enter_cb() register failed!"); + ret = em_dispatch_unregister_enter_cb(enter_cb); + test_fatal_if(ret != EM_OK, "enter_cb() unregister failed!"); + + ret = em_dispatch_register_exit_cb(exit_cb); + test_fatal_if(ret != EM_OK, "exit_cb() register failed!"); + ret = em_dispatch_register_exit_cb(exit_cb); + test_fatal_if(ret != EM_OK, "exit_cb() register failed!"); + ret = em_dispatch_unregister_exit_cb(exit_cb); + test_fatal_if(ret != EM_OK, "exit_cb() unregister failed!"); + + /* + * Register EM API hooks. + * Register each hook twice and then remove one - for testing + * purposes only. + */ + ret = em_hooks_register_alloc(alloc_hook); + test_fatal_if(ret != EM_OK, "alloc_hook() register failed!"); + ret = em_hooks_register_alloc(alloc_hook); + test_fatal_if(ret != EM_OK, "alloc_hook() register failed!"); + ret = em_hooks_unregister_alloc(alloc_hook); + test_fatal_if(ret != EM_OK, "alloc_hook() unregister failed!"); + + ret = em_hooks_register_free(free_hook); + test_fatal_if(ret != EM_OK, "free_hook() register failed!"); + ret = em_hooks_register_free(free_hook); + test_fatal_if(ret != EM_OK, "free_hook() register failed!"); + ret = em_hooks_unregister_free(free_hook); + + ret = em_hooks_register_send(send_hook); + test_fatal_if(ret != EM_OK, "send_hook() register failed!"); + ret = em_hooks_register_send(send_hook); + test_fatal_if(ret != EM_OK, "send_hook() register failed!"); + ret = em_hooks_unregister_send(send_hook); + test_fatal_if(ret != EM_OK, "send_hook() unregister failed!"); + + /* Start EO A */ + ret = em_eo_start_sync(eo_a, NULL, NULL); + test_fatal_if(ret != EM_OK, "em_eo_start_sync(eo_a) failed!"); + + /* Start EO B */ + ret = em_eo_start_sync(eo_b, NULL, NULL); + test_fatal_if(ret != EM_OK, "em_eo_start_sync(eo_b) failed!"); + + /* + * Send the first event to EO A's queue. + * Store the following destination queue into the event. + */ + em_event_t event; + ping_event_t *ping; + + event = em_alloc(sizeof(ping_event_t), EM_EVENT_TYPE_SW, + test_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event allocation failed!"); + + ping = em_event_pointer(event); + ping->dest = test_shm->queue_b; + ping->seq = 0; + + ret = em_send(event, test_shm->queue_a); + test_fatal_if(ret != EM_OK, + "em_send():%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, test_shm->queue_a); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + const em_eo_t eo_a = test_shm->eo_context_a.this_eo; + const em_eo_t eo_b = test_shm->eo_context_b.this_eo; + em_status_t stat; + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + stat = em_dispatch_unregister_enter_cb(enter_cb); + test_fatal_if(stat != EM_OK, "enter_cb() unregister failed!"); + + stat = em_dispatch_unregister_exit_cb(exit_cb); + test_fatal_if(stat != EM_OK, "exit_cb() unregister failed!"); + + stat = em_hooks_unregister_alloc(alloc_hook); + test_fatal_if(stat != EM_OK, "alloc_hook() unregister failed!"); + + stat = em_hooks_unregister_free(free_hook); + test_fatal_if(stat != EM_OK, "free_hook() unregister failed!"); + + stat = em_hooks_unregister_send(send_hook); + test_fatal_if(stat != EM_OK, "send_hook() unregister failed!"); + + stat = em_eo_stop_sync(eo_a); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO A stop failed!"); + stat = em_eo_stop_sync(eo_b); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO B stop failed!"); + + stat = em_eo_delete(eo_a); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO A delete failed!"); + stat = em_eo_delete(eo_b); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO B delete failed!"); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(test_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +ping_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf) +{ + my_eo_context_t *const my_eo_ctx = eo_ctx; + em_queue_t queue; + em_status_t status; + my_queue_context_t *my_q_ctx; + const char *queue_name; + + (void)conf; + + /* Copy EO name */ + em_eo_get_name(eo, my_eo_ctx->name, sizeof(my_eo_ctx->name)); + + if (my_eo_ctx->is_a) { + queue_name = "queue A"; + my_q_ctx = &test_shm->queue_context_a; + } else { + queue_name = "queue B"; + my_q_ctx = &test_shm->queue_context_b; + } + + queue = em_queue_create(queue_name, EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, EM_QUEUE_GROUP_DEFAULT, + NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "%s creation failed!", queue_name); + + my_eo_ctx->my_queue = queue; /* for ping_stop() */ + my_q_ctx->queue = queue; + + status = em_queue_set_context(queue, my_q_ctx); + test_fatal_if(status != EM_OK, + "Set queue context:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", status, eo, queue); + + status = em_eo_add_queue_sync(eo, queue); + test_fatal_if(status != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", status, eo, queue); + + APPL_PRINT("Test start %s: EO %" PRI_EO ", queue:%" PRI_QUEUE ".\n", + my_eo_ctx->name, eo, queue); + + if (my_eo_ctx->is_a) + test_shm->queue_a = queue; + else + test_shm->queue_b = queue; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +ping_stop(void *eo_ctx, em_eo_t eo) +{ + my_eo_context_t *const my_eo_ctx = eo_ctx; + em_queue_t queue = my_eo_ctx->my_queue; + em_status_t status; + + APPL_PRINT("Dispatcher callback example stop (%s, eo id %" PRI_EO ")\n", + my_eo_ctx->name, eo); + + status = em_eo_remove_queue_sync(eo, queue); + if (status != EM_OK) + return status; + + status = em_queue_delete(queue); + if (status != EM_OK) + return status; + + return EM_OK; +} + +/** + * @private + * + * EO receive function. + * + * Print "Event received" and send back to the sender of the event. + * + */ +static void +ping_receive(void *eo_ctx, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + my_eo_context_t *const my_eo_ctx = eo_ctx; + em_queue_t dest; + em_status_t status; + ping_event_t *ping, *new_ping; + em_event_t new_event; + (void)type; + (void)q_ctx; + + ping = em_event_pointer(event); + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + dest = ping->dest; + ping->dest = queue; + + APPL_PRINT("** EO-rcv: Ping from EO:'%s'(%" PRI_EO ") on core%02d!\t" + "Queue:%" PRI_QUEUE "\t\t" + "Event:%" PRI_EVENT " Event-seq:%u\n", + my_eo_ctx->name, my_eo_ctx->this_eo, em_core_id(), + queue, event, ping->seq++); + + new_event = em_alloc(sizeof(ping_event_t), EM_EVENT_TYPE_SW, + test_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event allocation failed!"); + new_ping = em_event_pointer(new_event); + memcpy(new_ping, ping, sizeof(ping_event_t)); + em_free(event); + + delay_spin(SPIN_COUNT); + + status = em_send(new_event, dest); + if (unlikely(status != EM_OK)) { + em_free(new_event); + test_fatal_if(!appl_shm->exit_flag, + "em_send():%" PRI_STAT "EO:%" PRI_EO "\n" + "Rcv-Q:%" PRI_QUEUE " Dst-Q:%" PRI_QUEUE "", + status, my_eo_ctx->this_eo, queue, dest); + } +} + +/** + * Callback functions + */ + +static void +enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx) +{ + my_eo_context_t *my_eo_ctx = *eo_ctx; + my_queue_context_t *my_q_ctx = *q_ctx; + ping_event_t *ping; + em_event_t event = events[0]; + + (void)num; /* 1 event at a time here */ + (void)queue; + + ping = em_event_pointer(event); + + APPL_PRINT("\n" + "+ Dispatch enter callback EO:'%s'(%" PRI_EO ")\t" + "Queue:%" PRI_QUEUE " on core%02i\t" + "Event:%" PRI_EVENT " Event-seq:%u\n", + my_eo_ctx->name, eo, my_q_ctx->queue, em_core_id(), + event, ping->seq); +} + +static void +exit_cb(em_eo_t eo) +{ + my_eo_context_t *my_eo_ctx = em_eo_get_context(eo); + + APPL_PRINT("- Dispatch exit callback EO:'%s'(%" PRI_EO ")\n", + my_eo_ctx->name, eo); +} + +static void +alloc_hook(const em_event_t events[/*num_act*/], int num_act, int num_req, + uint32_t size, em_event_type_t type, em_pool_t pool) +{ + em_eo_t eo, eo_a, eo_b; + void *eo_ctx; + my_eo_context_t *my_eo_ctx; + + (void)num_req; + + eo = em_eo_current(); + if (unlikely(eo == EM_EO_UNDEF)) + return; + eo_ctx = em_eo_get_context(eo); + if (unlikely(eo_ctx == NULL)) + return; + + /* Only print stuff for this test's EOs */ + if (unlikely(test_shm == NULL)) + return; + eo_a = test_shm->eo_context_a.this_eo; + eo_b = test_shm->eo_context_a.other_eo; + if (eo != eo_a && eo != eo_b) + return; + + my_eo_ctx = eo_ctx; + + APPL_PRINT(" Alloc-hook EO:'%s'(%" PRI_EO ")\t" + "sz:%u type:0x%x pool:%" PRI_POOL "\t\t" + "Events[%d]:", + my_eo_ctx->name, eo, size, type, pool, + num_act); + for (int i = 0; i < num_act; i++) + APPL_PRINT(" %" PRI_EVENT "", events[i]); + APPL_PRINT("\n"); +} + +static void +free_hook(const em_event_t events[], int num) +{ + em_eo_t eo, eo_a, eo_b; + void *eo_ctx; + my_eo_context_t *my_eo_ctx; + + eo = em_eo_current(); + if (unlikely(eo == EM_EO_UNDEF)) + return; + eo_ctx = em_eo_get_context(eo); + if (unlikely(eo_ctx == NULL)) + return; + + /* Only print stuff for this test's EOs */ + if (unlikely(test_shm == NULL)) + return; + eo_a = test_shm->eo_context_a.this_eo; + eo_b = test_shm->eo_context_a.other_eo; + if (eo != eo_a && eo != eo_b) + return; + + my_eo_ctx = eo_ctx; + + APPL_PRINT(" Free-hook EO:'%s'(%" PRI_EO ")\t\t\t\t\t\t" + "Events[%d]:", my_eo_ctx->name, eo, num); + for (int i = 0; i < num; i++) + APPL_PRINT(" %" PRI_EVENT "", events[i]); + APPL_PRINT("\n"); +} + +static void +send_hook(const em_event_t events[], int num, + em_queue_t queue, em_event_group_t event_group) +{ + em_eo_t eo, eo_a, eo_b; + void *eo_ctx; + my_eo_context_t *my_eo_ctx; + + (void)events; + (void)event_group; + + eo = em_eo_current(); + if (unlikely(eo == EM_EO_UNDEF)) + return; + eo_ctx = em_eo_get_context(eo); + if (unlikely(eo_ctx == NULL)) + return; + + /* Only print stuff for this test's EOs */ + if (unlikely(test_shm == NULL)) + return; + eo_a = test_shm->eo_context_a.this_eo; + eo_b = test_shm->eo_context_a.other_eo; + if (eo != eo_a && eo != eo_b) + return; + + my_eo_ctx = eo_ctx; + + APPL_PRINT(" Send-hook EO:'%s'(%" PRI_EO ")\t" + "%d event(s)\tQueue:%" PRI_QUEUE " ==> %" PRI_QUEUE "\t" + "Events[%d]:", + my_eo_ctx->name, eo, num, em_queue_current(), queue, num); + for (int i = 0; i < num; i++) + APPL_PRINT(" %" PRI_EVENT "", events[i]); + APPL_PRINT("\n"); +} diff --git a/programs/example/queue/ordered.c b/programs/example/queue/ordered.c index 938e6d23..ba791a55 100644 --- a/programs/example/queue/ordered.c +++ b/programs/example/queue/ordered.c @@ -1,857 +1,853 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine Parallel-Ordered queue test - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test configuration - */ - -/** Number of test EOs and queues. Must be an even number. */ -#define NUM_EO 32 - -/** Number of initial events per EO pair. */ -#define NUM_EVENT 37 - -/** Number of events that EO-ordered will allocate for each input event */ -#define NUM_SUB_EVENT 11 - -/** Max number of cores */ -#define MAX_NBR_OF_CORES 128 - -/** The number of events to be received before printing a result */ -#define PRINT_EVENT_COUNT 0xff0000 - -/** Print results on all cores */ -#define PRINT_ON_ALL_CORES 1 /* 0=False or 1=True */ - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -/** - * Test statistics (per core) - */ -typedef union { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - struct { - uint64_t num_events; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; - }; -} test_stat_t; - -COMPILE_TIME_ASSERT(sizeof(test_stat_t) == ENV_CACHE_LINE_SIZE, - TEST_STAT_T_SIZE_ERROR); - -/** - * Ordered queue context - */ -typedef struct { - /** Next destination queue */ - em_queue_t dest_queue; -} q_ordered_context_t; - -/** - * Atomic queue context - */ -typedef struct { - /** Expected sequence number */ - uint32_t seq; - /** Expected sub-sequence number */ - uint32_t sub_seq; - /** Next destination queue */ - em_queue_t dest_queue; -} q_atomic_context_t; - -/** - * Queue context padded to cache line size - */ -typedef union { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - q_ordered_context_t q_ordered_ctx; - q_atomic_context_t q_atomic_ctx; -} q_context_array_elem_t; - -COMPILE_TIME_ASSERT(sizeof(q_context_array_elem_t) == ENV_CACHE_LINE_SIZE, - Q_CONTEXT_SIZE_ERROR); - -/** - * EO-ordered context, i.e. EO with an oredered queue - */ -typedef struct { - /** This EO */ - em_eo_t hdl; - /** The EO's queue */ - em_queue_t ordered_queue; -} eo_ordered_context_t; - -/** - * EO-atomic context, i.e. EO with an atomic queue - */ -typedef struct { - /** This EO */ - em_eo_t hdl; - /** The EO's queue */ - em_queue_t atomic_queue; - /** The peer EO's ordered queue */ - em_queue_t peer_ordered_queue; -} eo_atomic_context_t; - -/** - * Queue context padded to cache line size - */ -typedef union { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - eo_ordered_context_t eo_ordered_ctx; - eo_atomic_context_t eo_atomic_ctx; -} eo_context_array_elem_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_array_elem_t) == ENV_CACHE_LINE_SIZE, - EO_CONTEXT_SIZE_ERROR); - -#define EV_ID_ORDERED_EVENT 1 -#define EV_ID_START_EVENT 2 -/** Ordered event content */ -typedef struct { - /** Event ID */ - int ev_id; - /** Sequence number */ - uint32_t seq; - /** Sub-sequence number */ - uint32_t sub_seq; - /** Indication from sender that event might be received out of order */ - int out_of_order; - /** Indication from sender that event is last in order using 'seq' */ - int last_in_order; - /** Indication from sender that event is a copy of 'original' */ - int is_copy; - /** If the event is a copy then the original event is sent along */ - em_event_t original; -} ordered_event_t; -/** Startup event content */ -typedef struct { - /** Event ID */ - int ev_id; - /** Request to allocate and send test events into the 'ordered_queue' */ - em_queue_t ordered_queue; -} start_event_t; -/** - * Test event, content identified by 'ev_id' - */ -typedef union { - int ev_id; - ordered_event_t ordered; - start_event_t start; -} test_event_t; - -/** - * Test shared memory - */ -typedef struct { - /** Event pool used by this application */ - em_pool_t pool; - /** Ordered queue context array */ - q_context_array_elem_t q_ordered_ctx[NUM_EO / 2] - ENV_CACHE_LINE_ALIGNED; - /** Atomic queue context array */ - q_context_array_elem_t q_atomic_ctx[NUM_EO / 2] - ENV_CACHE_LINE_ALIGNED; - /** EO context array for EOs with ordered queue */ - eo_context_array_elem_t eo_ordered_ctx[NUM_EO / 2] - ENV_CACHE_LINE_ALIGNED; - /** EO context array for EOs with atomic queue */ - eo_context_array_elem_t eo_atomic_ctx[NUM_EO / 2] - ENV_CACHE_LINE_ALIGNED; - /** Array of core specific data accessed by using core index. */ - test_stat_t core_stat[MAX_NBR_OF_CORES] ENV_CACHE_LINE_ALIGNED; -} test_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL test_shm_t *test_shm; - -static em_status_t -eo_ordered_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t -eo_ordered_stop(void *eo_context, em_eo_t eo); -static void -eo_ordered_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_ctx); - -static em_status_t -eo_atomic_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t -eo_atomic_stop(void *eo_context, em_eo_t eo); -static void -eo_atomic_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_ctx); -static void -initialize_events(start_event_t *const start_event); -static void -print_result(test_stat_t *const test_stat); -static int -get_queue_priority(const int index); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - test_shm = env_shared_reserve("TestSharedMem", - sizeof(test_shm_t)); - em_register_error_handler(test_error_handler); - } else { - test_shm = env_shared_lookup("TestSharedMem"); - } - - if (test_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Test init failed on EM-core:%u", em_core_id()); - else if (core == 0) - memset(test_shm, 0, sizeof(test_shm_t)); -} - -/** - * Startup of the test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo_a, eo_b; - em_queue_t queue_a, queue_b; - em_status_t ret; - eo_ordered_context_t *eo_ordered_ctx; - eo_atomic_context_t *eo_atomic_ctx; - q_ordered_context_t *q_ordered_ctx; - q_atomic_context_t *q_atomic_ctx; - int i; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - test_shm->pool = appl_conf->pools[0]; - else - test_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - test_shm->pool); - - test_fatal_if(test_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* - * Create and start EO's & queues - */ - for (i = 0; i < NUM_EO / 2; i++) { - eo_ordered_ctx = &test_shm->eo_ordered_ctx[i].eo_ordered_ctx; - eo_atomic_ctx = &test_shm->eo_atomic_ctx[i].eo_atomic_ctx; - q_ordered_ctx = &test_shm->q_ordered_ctx[i].q_ordered_ctx; - q_atomic_ctx = &test_shm->q_atomic_ctx[i].q_atomic_ctx; - - /* Create EO with ordered queue */ - eo_a = em_eo_create("eo-ordered", eo_ordered_start, NULL, - eo_ordered_stop, NULL, eo_ordered_receive, - eo_ordered_ctx); - queue_a = em_queue_create("ordered", - EM_QUEUE_TYPE_PARALLEL_ORDERED, - get_queue_priority(i), - EM_QUEUE_GROUP_DEFAULT, NULL); - - ret = em_queue_set_context(queue_a, q_ordered_ctx); - test_fatal_if(ret != EM_OK, - "Queue set context:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo_a, queue_a); - - ret = em_eo_add_queue_sync(eo_a, queue_a); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo_a, queue_a); - - eo_ordered_ctx->hdl = eo_a; - eo_ordered_ctx->ordered_queue = queue_a; - - /* Create EO with an atomic queue */ - eo_b = em_eo_create("eo-atomic", eo_atomic_start, NULL, - eo_atomic_stop, NULL, eo_atomic_receive, - eo_atomic_ctx); - queue_b = em_queue_create("atomic", - EM_QUEUE_TYPE_ATOMIC, - get_queue_priority(i), - EM_QUEUE_GROUP_DEFAULT, NULL); - - ret = em_queue_set_context(queue_b, q_atomic_ctx); - test_fatal_if(ret != EM_OK, - "Queue set context:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo_b, queue_b); - - ret = em_eo_add_queue_sync(eo_b, queue_b); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo_b, queue_b); - eo_atomic_ctx->hdl = eo_b; - eo_atomic_ctx->atomic_queue = queue_b; - eo_atomic_ctx->peer_ordered_queue = queue_a; - - /* Initialize queue context data */ - q_ordered_ctx->dest_queue = queue_b; - q_atomic_ctx->seq = 0; - q_atomic_ctx->sub_seq = 0; - q_atomic_ctx->dest_queue = queue_a; - - /* Start the EO's */ - ret = em_eo_start_sync(eo_a, NULL, NULL); - test_fatal_if(ret != EM_OK, - "EO start:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo_a); - ret = em_eo_start_sync(eo_b, NULL, NULL); - test_fatal_if(ret != EM_OK, - "EO start:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo_b); - } -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* stop all EOs */ - for (i = 0; i < NUM_EO / 2; i++) { - eo = test_shm->eo_atomic_ctx[i].eo_atomic_ctx.hdl; - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO stop:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - eo = test_shm->eo_ordered_ctx[i].eo_ordered_ctx.hdl; - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO stop:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - } -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(test_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - */ -static em_status_t -eo_ordered_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - (void)eo_context; - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - return EM_OK; -} - -/** - * @private - * - * EO start function. - */ -static em_status_t -eo_atomic_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_atomic_context_t *const eo_ctx = eo_context; - em_status_t ret; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - /* - * Allocate and send the startup event to the atomic EO of the pair. - */ - em_event_t event = em_alloc(sizeof(start_event_t), EM_EVENT_TYPE_SW, - test_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); - start_event_t *start_event = em_event_pointer(event); - - start_event->ev_id = EV_ID_START_EVENT; - start_event->ordered_queue = eo_ctx->peer_ordered_queue; - - ret = em_send(event, eo_ctx->atomic_queue); - test_fatal_if(ret != EM_OK, "start event send:%" PRI_STAT ""); - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -eo_ordered_stop(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - /* delete the EO at the end of the stop-function */ - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO delete:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -eo_atomic_stop(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - /* delete the EO at the end of the stop-function */ - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO delete:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * @private - * - * EO receive function for EO A. - * - * Loops back events and calculates the event rate. - */ -static void -eo_ordered_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_ctx) -{ - q_ordered_context_t *const q_ctx = queue_ctx; - test_event_t *const test_event = em_event_pointer(event); - ordered_event_t *ordered; - em_status_t ret; - int interleave; - int out_of_order = 0; - uint32_t sub_seq; - int i; - - (void)eo_context; - (void)type; - (void)queue; - - test_fatal_if(test_event->ev_id != EV_ID_ORDERED_EVENT, - "Unexpected ev-id:%d", test_event->ev_id); - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - ordered = &test_event->ordered; - ordered->out_of_order = 0; - ordered->last_in_order = 0; - ordered->is_copy = 0; - - /* interleave the input event in between the output events */ - interleave = ordered->seq % (NUM_SUB_EVENT + 1); - - for (i = 0, sub_seq = 0; i < NUM_SUB_EVENT; i++, sub_seq++) { - /* allocate sub-events to send in the same ordered context */ - em_event_t sub_event = em_alloc(sizeof(ordered_event_t), - EM_EVENT_TYPE_SW, - test_shm->pool); - - test_fatal_if(sub_event == EM_EVENT_UNDEF, - "Sub-event alloc failed:%i", i); - - ordered_event_t *const sub_ordered = - em_event_pointer(sub_event); - - sub_ordered->ev_id = EV_ID_ORDERED_EVENT; - sub_ordered->seq = ordered->seq; - - if (interleave == i) { - ordered->sub_seq = sub_seq; - sub_seq++; - ordered->last_in_order = 1; - - em_event_t copy_event = - em_alloc(sizeof(ordered_event_t), - EM_EVENT_TYPE_SW, test_shm->pool); - test_fatal_if(copy_event == EM_EVENT_UNDEF, - "Copy-event alloc failed:%i", i); - ordered_event_t *const copy_ordered = - em_event_pointer(copy_event); - memcpy(copy_ordered, ordered, sizeof(ordered_event_t)); - copy_ordered->is_copy = 1; - copy_ordered->original = event; /* store original */ - - ret = em_send(copy_event, q_ctx->dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(copy_event); - test_fatal_if(!appl_shm->exit_flag, - "event send:%" PRI_STAT ""); - } - out_of_order = 1; - em_ordered_processing_end(); - } - - sub_ordered->sub_seq = sub_seq; - sub_ordered->out_of_order = out_of_order; - sub_ordered->last_in_order = 0; - sub_ordered->is_copy = 0; - - ret = em_send(sub_event, q_ctx->dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(sub_event); - test_fatal_if(!appl_shm->exit_flag, - "event send:%" PRI_STAT ""); - } - } - - if (interleave == i) { - ordered->sub_seq = sub_seq; - ordered->out_of_order = 0; - ordered->last_in_order = 1; - ret = em_send(event, q_ctx->dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "event send:%" PRI_STAT ""); - } - } -} - -/** - * @private - * - * EO receive function for EO B. - * - * Loops back events. - */ -static void -eo_atomic_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_ctx) -{ - eo_atomic_context_t *const eo_ctx = eo_context; - q_atomic_context_t *const q_ctx = queue_ctx; - test_event_t *const test_event = em_event_pointer(event); - const int core = em_core_id(); - ordered_event_t *ordered; - em_status_t ret; - uint64_t num_events; - uint32_t seq, sub_seq; - int out_of_order, last_in_order; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - if (test_event->ev_id == EV_ID_ORDERED_EVENT) { - ordered = &test_event->ordered; - if (ordered->is_copy) - em_free(ordered->original); - } - em_free(event); - return; - } - - if (unlikely(test_event->ev_id == EV_ID_START_EVENT)) { - /* - * Start-up only, one time: initialize the test event sending. - * Called from EO-receive to avoid mixing up events & sequence - * numbers in start-up for ordered EO-pairs (sending from the - * start functions could mess up the seqno:s since all the - * cores are already in the dispatch loop). - */ - initialize_events(&test_event->start); - em_free(event); - return; - } - - test_fatal_if(test_event->ev_id != EV_ID_ORDERED_EVENT, - "Unexpected ev-id:%d", test_event->ev_id); - - ordered = &test_event->ordered; - seq = ordered->seq; - sub_seq = ordered->sub_seq; - out_of_order = ordered->out_of_order; - last_in_order = ordered->last_in_order; - - if (ordered->is_copy) - em_free(ordered->original); - - /* Check the sequence number for events that should be in order */ - uint32_t q_ctx_seq = q_ctx->seq; - uint32_t q_ctx_sub_seq = q_ctx->sub_seq; - - if (!out_of_order && - unlikely(seq != q_ctx_seq || sub_seq != q_ctx_sub_seq)) - APPL_EXIT_FAILURE("Bad seqnbr EO:%" PRI_EO " Q:%" PRI_QUEUE "\t" - "expected:%u-%u event-seq:%u-%u core:%d\n", - eo_ctx->hdl, queue, q_ctx_seq, - q_ctx_sub_seq, seq, sub_seq, core); - - if (out_of_order) { - em_free(event); - } else if (last_in_order) { - ordered->seq = q_ctx_seq + NUM_EVENT; - ordered->sub_seq = 0; - q_ctx->seq = q_ctx_seq + 1; - q_ctx->sub_seq = 0; - ret = em_send(event, q_ctx->dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "event send:%" PRI_STAT ""); - } - } else if (!out_of_order) { - q_ctx->sub_seq = q_ctx_sub_seq + 1; - em_free(event); - } - - num_events = test_shm->core_stat[core].num_events; - - /* Update the cycle count and print results when necessary */ - if (unlikely(num_events == 0)) { - test_shm->core_stat[core].begin_cycles = env_get_cycle(); - num_events = 1; - } else if (unlikely(num_events > PRINT_EVENT_COUNT)) { - test_shm->core_stat[core].end_cycles = env_get_cycle(); - test_shm->core_stat[core].print_count += 1; - - /* Print measurement result */ - if (PRINT_ON_ALL_CORES) - print_result(&test_shm->core_stat[core]); - else if (core == 0) - print_result(&test_shm->core_stat[core]); - /* Restart the measurement */ - test_shm->core_stat[core].begin_cycles = env_get_cycle(); - num_events = 0; - } else { - num_events += 1; - } - - test_shm->core_stat[core].num_events = num_events; -} - -/** - * @private - * - * Initialize test events. Allocate and send the test events to an EO-pair. - */ -static void -initialize_events(start_event_t *const start_event) -{ - em_event_t events[NUM_EVENT]; - ordered_event_t *ordered; - int num_sent = 0; - int i, j; - - /* Alloc and send test events */ - for (i = 0; i < NUM_EVENT; i++) { - events[i] = em_alloc(sizeof(ordered_event_t), EM_EVENT_TYPE_SW, - test_shm->pool); - test_fatal_if(events[i] == EM_EVENT_UNDEF, - "Event allocation failed:%i", i); - - ordered = em_event_pointer(events[i]); - ordered->ev_id = EV_ID_ORDERED_EVENT; - ordered->seq = i; - ordered->sub_seq = 0; - } - - /* Send in bursts of 'SEND_MULTI_MAX' events */ - const int send_rounds = NUM_EVENT / SEND_MULTI_MAX; - const int left_over = NUM_EVENT % SEND_MULTI_MAX; - - for (i = 0, j = 0; i < send_rounds; i++, j += SEND_MULTI_MAX) { - num_sent += em_send_multi(&events[j], SEND_MULTI_MAX, - start_event->ordered_queue); - } - if (left_over) { - num_sent += em_send_multi(&events[j], left_over, - start_event->ordered_queue); - } - test_fatal_if(num_sent != NUM_EVENT, - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, NUM_EVENT, start_event->ordered_queue); -} - -/** - * Get queue priority value based on the index number. - * - * @param Queue index - * - * @return Queue priority value - * - * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH - */ -static int -get_queue_priority(const int queue_index) -{ - int remainder = queue_index % 5; - - if (remainder <= 1) - return EM_QUEUE_PRIO_LOW; - else if (remainder <= 3) - return EM_QUEUE_PRIO_NORMAL; - else - return EM_QUEUE_PRIO_HIGH; -} - -/** - * Prints test measurement result - */ -static void -print_result(test_stat_t *const test_stat) -{ - uint64_t diff; - uint32_t hz; - double mhz; - double cycles_per_event; - uint64_t print_count; - - if (likely(test_stat->end_cycles > test_stat->begin_cycles)) - diff = test_stat->end_cycles - test_stat->begin_cycles; - else - diff = UINT64_MAX - test_stat->begin_cycles + - test_stat->end_cycles + 1; - - print_count = test_stat->print_count; - cycles_per_event = ((double)diff) / ((double)test_stat->num_events); - - hz = env_core_hz(); - mhz = ((double)hz) / 1000000.0; - - APPL_PRINT("cycles per event %.2f @%.2f MHz (core-%02i %" PRIu64 ")\n", - cycles_per_event, mhz, em_core_id(), print_count); -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine Parallel-Ordered queue test + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test configuration + */ + +/** Number of test EOs and queues. Must be an even number. */ +#define NUM_EO 32 + +/** Number of initial events per EO pair. */ +#define NUM_EVENT 37 + +/** Number of events that EO-ordered will allocate for each input event */ +#define NUM_SUB_EVENT 11 + +/** Max number of cores */ +#define MAX_NBR_OF_CORES 128 + +/** The number of events to be received before printing a result */ +#define PRINT_EVENT_COUNT 0xff0000 + +/** Print results on all cores */ +#define PRINT_ON_ALL_CORES 1 /* 0=False or 1=True */ + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/** + * Test statistics (per core) + */ +typedef union { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + struct { + uint64_t num_events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; + }; +} test_stat_t; + +COMPILE_TIME_ASSERT(sizeof(test_stat_t) == ENV_CACHE_LINE_SIZE, + TEST_STAT_T_SIZE_ERROR); + +/** + * Ordered queue context + */ +typedef struct { + /** Next destination queue */ + em_queue_t dest_queue; +} q_ordered_context_t; + +/** + * Atomic queue context + */ +typedef struct { + /** Expected sequence number */ + uint32_t seq; + /** Expected sub-sequence number */ + uint32_t sub_seq; + /** Next destination queue */ + em_queue_t dest_queue; +} q_atomic_context_t; + +/** + * Queue context padded to cache line size + */ +typedef union { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + q_ordered_context_t q_ordered_ctx; + q_atomic_context_t q_atomic_ctx; +} q_context_array_elem_t; + +COMPILE_TIME_ASSERT(sizeof(q_context_array_elem_t) == ENV_CACHE_LINE_SIZE, + Q_CONTEXT_SIZE_ERROR); + +/** + * EO-ordered context, i.e. EO with an oredered queue + */ +typedef struct { + /** This EO */ + em_eo_t hdl; + /** The EO's queue */ + em_queue_t ordered_queue; +} eo_ordered_context_t; + +/** + * EO-atomic context, i.e. EO with an atomic queue + */ +typedef struct { + /** This EO */ + em_eo_t hdl; + /** The EO's queue */ + em_queue_t atomic_queue; + /** The peer EO's ordered queue */ + em_queue_t peer_ordered_queue; +} eo_atomic_context_t; + +/** + * Queue context padded to cache line size + */ +typedef union { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + eo_ordered_context_t eo_ordered_ctx; + eo_atomic_context_t eo_atomic_ctx; +} eo_context_array_elem_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_array_elem_t) == ENV_CACHE_LINE_SIZE, + EO_CONTEXT_SIZE_ERROR); + +#define EV_ID_ORDERED_EVENT 1 +#define EV_ID_START_EVENT 2 +/** Ordered event content */ +typedef struct { + /** Event ID */ + int ev_id; + /** Sequence number */ + uint32_t seq; + /** Sub-sequence number */ + uint32_t sub_seq; + /** Indication from sender that event might be received out of order */ + int out_of_order; + /** Indication from sender that event is last in order using 'seq' */ + int last_in_order; + /** Indication from sender that event is a copy of 'original' */ + int is_copy; + /** If the event is a copy then the original event is sent along */ + em_event_t original; +} ordered_event_t; +/** Startup event content */ +typedef struct { + /** Event ID */ + int ev_id; + /** Request to allocate and send test events into the 'ordered_queue' */ + em_queue_t ordered_queue; +} start_event_t; +/** + * Test event, content identified by 'ev_id' + */ +typedef union { + int ev_id; + ordered_event_t ordered; + start_event_t start; +} test_event_t; + +/** + * Test shared memory + */ +typedef struct { + /** Event pool used by this application */ + em_pool_t pool; + /** Ordered queue context array */ + q_context_array_elem_t q_ordered_ctx[NUM_EO / 2] + ENV_CACHE_LINE_ALIGNED; + /** Atomic queue context array */ + q_context_array_elem_t q_atomic_ctx[NUM_EO / 2] + ENV_CACHE_LINE_ALIGNED; + /** EO context array for EOs with ordered queue */ + eo_context_array_elem_t eo_ordered_ctx[NUM_EO / 2] + ENV_CACHE_LINE_ALIGNED; + /** EO context array for EOs with atomic queue */ + eo_context_array_elem_t eo_atomic_ctx[NUM_EO / 2] + ENV_CACHE_LINE_ALIGNED; + /** Array of core specific data accessed by using core index. */ + test_stat_t core_stat[MAX_NBR_OF_CORES] ENV_CACHE_LINE_ALIGNED; +} test_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL test_shm_t *test_shm; + +static em_status_t +eo_ordered_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t +eo_ordered_stop(void *eo_context, em_eo_t eo); +static void +eo_ordered_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_ctx); + +static em_status_t +eo_atomic_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t +eo_atomic_stop(void *eo_context, em_eo_t eo); +static void +eo_atomic_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_ctx); +static void +initialize_events(start_event_t *const start_event); +static void +print_result(test_stat_t *const test_stat); +static int +get_queue_priority(const int index); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + test_shm = env_shared_reserve("TestSharedMem", + sizeof(test_shm_t)); + em_register_error_handler(test_error_handler); + } else { + test_shm = env_shared_lookup("TestSharedMem"); + } + + if (test_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Test init failed on EM-core:%u", em_core_id()); + else if (core == 0) + memset(test_shm, 0, sizeof(test_shm_t)); +} + +/** + * Startup of the test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo_a, eo_b; + em_queue_t queue_a, queue_b; + em_status_t ret; + eo_ordered_context_t *eo_ordered_ctx; + eo_atomic_context_t *eo_atomic_ctx; + q_ordered_context_t *q_ordered_ctx; + q_atomic_context_t *q_atomic_ctx; + int i; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + test_shm->pool = appl_conf->pools[0]; + else + test_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + test_shm->pool); + + test_fatal_if(test_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start EO's & queues + */ + for (i = 0; i < NUM_EO / 2; i++) { + eo_ordered_ctx = &test_shm->eo_ordered_ctx[i].eo_ordered_ctx; + eo_atomic_ctx = &test_shm->eo_atomic_ctx[i].eo_atomic_ctx; + q_ordered_ctx = &test_shm->q_ordered_ctx[i].q_ordered_ctx; + q_atomic_ctx = &test_shm->q_atomic_ctx[i].q_atomic_ctx; + + /* Create EO with ordered queue */ + eo_a = em_eo_create("eo-ordered", eo_ordered_start, NULL, + eo_ordered_stop, NULL, eo_ordered_receive, + eo_ordered_ctx); + queue_a = em_queue_create("ordered", + EM_QUEUE_TYPE_PARALLEL_ORDERED, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + + ret = em_queue_set_context(queue_a, q_ordered_ctx); + test_fatal_if(ret != EM_OK, + "Queue set context:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo_a, queue_a); + + ret = em_eo_add_queue_sync(eo_a, queue_a); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo_a, queue_a); + + eo_ordered_ctx->hdl = eo_a; + eo_ordered_ctx->ordered_queue = queue_a; + + /* Create EO with an atomic queue */ + eo_b = em_eo_create("eo-atomic", eo_atomic_start, NULL, + eo_atomic_stop, NULL, eo_atomic_receive, + eo_atomic_ctx); + queue_b = em_queue_create("atomic", + EM_QUEUE_TYPE_ATOMIC, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + + ret = em_queue_set_context(queue_b, q_atomic_ctx); + test_fatal_if(ret != EM_OK, + "Queue set context:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo_b, queue_b); + + ret = em_eo_add_queue_sync(eo_b, queue_b); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo_b, queue_b); + eo_atomic_ctx->hdl = eo_b; + eo_atomic_ctx->atomic_queue = queue_b; + eo_atomic_ctx->peer_ordered_queue = queue_a; + + /* Initialize queue context data */ + q_ordered_ctx->dest_queue = queue_b; + q_atomic_ctx->seq = 0; + q_atomic_ctx->sub_seq = 0; + q_atomic_ctx->dest_queue = queue_a; + + /* Start the EO's */ + ret = em_eo_start_sync(eo_a, NULL, NULL); + test_fatal_if(ret != EM_OK, + "EO start:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo_a); + ret = em_eo_start_sync(eo_b, NULL, NULL); + test_fatal_if(ret != EM_OK, + "EO start:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo_b); + } +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* stop all EOs */ + for (i = 0; i < NUM_EO / 2; i++) { + eo = test_shm->eo_atomic_ctx[i].eo_atomic_ctx.hdl; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO stop:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + eo = test_shm->eo_ordered_ctx[i].eo_ordered_ctx.hdl; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO stop:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + } +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(test_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + */ +static em_status_t +eo_ordered_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_context; + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + return EM_OK; +} + +/** + * @private + * + * EO start function. + */ +static em_status_t +eo_atomic_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_atomic_context_t *const eo_ctx = eo_context; + em_status_t ret; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + /* + * Allocate and send the startup event to the atomic EO of the pair. + */ + em_event_t event = em_alloc(sizeof(start_event_t), EM_EVENT_TYPE_SW, + test_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + start_event_t *start_event = em_event_pointer(event); + + start_event->ev_id = EV_ID_START_EVENT; + start_event->ordered_queue = eo_ctx->peer_ordered_queue; + + ret = em_send(event, eo_ctx->atomic_queue); + test_fatal_if(ret != EM_OK, "start event send:%" PRI_STAT ""); + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +eo_ordered_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + /* delete the EO at the end of the stop-function */ + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO delete:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +eo_atomic_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + /* delete the EO at the end of the stop-function */ + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO delete:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * @private + * + * EO receive function for EO A. + * + * Loops back events and calculates the event rate. + */ +static void +eo_ordered_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_ctx) +{ + q_ordered_context_t *const q_ctx = queue_ctx; + test_event_t *const test_event = em_event_pointer(event); + ordered_event_t *ordered; + em_status_t ret; + int interleave; + int out_of_order = 0; + uint32_t sub_seq; + int i; + + (void)eo_context; + (void)type; + (void)queue; + + test_fatal_if(test_event->ev_id != EV_ID_ORDERED_EVENT, + "Unexpected ev-id:%d", test_event->ev_id); + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + ordered = &test_event->ordered; + ordered->out_of_order = 0; + ordered->last_in_order = 0; + ordered->is_copy = 0; + + /* interleave the input event in between the output events */ + interleave = ordered->seq % (NUM_SUB_EVENT + 1); + + for (i = 0, sub_seq = 0; i < NUM_SUB_EVENT; i++, sub_seq++) { + /* allocate sub-events to send in the same ordered context */ + em_event_t sub_event = em_alloc(sizeof(ordered_event_t), + EM_EVENT_TYPE_SW, + test_shm->pool); + + test_fatal_if(sub_event == EM_EVENT_UNDEF, + "Sub-event alloc failed:%i", i); + + ordered_event_t *const sub_ordered = + em_event_pointer(sub_event); + + sub_ordered->ev_id = EV_ID_ORDERED_EVENT; + sub_ordered->seq = ordered->seq; + + if (interleave == i) { + ordered->sub_seq = sub_seq; + sub_seq++; + ordered->last_in_order = 1; + + em_event_t copy_event = + em_alloc(sizeof(ordered_event_t), + EM_EVENT_TYPE_SW, test_shm->pool); + test_fatal_if(copy_event == EM_EVENT_UNDEF, + "Copy-event alloc failed:%i", i); + ordered_event_t *const copy_ordered = + em_event_pointer(copy_event); + memcpy(copy_ordered, ordered, sizeof(ordered_event_t)); + copy_ordered->is_copy = 1; + copy_ordered->original = event; /* store original */ + + ret = em_send(copy_event, q_ctx->dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(copy_event); + test_fatal_if(!appl_shm->exit_flag, + "event send:%" PRI_STAT ""); + } + out_of_order = 1; + em_ordered_processing_end(); + } + + sub_ordered->sub_seq = sub_seq; + sub_ordered->out_of_order = out_of_order; + sub_ordered->last_in_order = 0; + sub_ordered->is_copy = 0; + + ret = em_send(sub_event, q_ctx->dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(sub_event); + test_fatal_if(!appl_shm->exit_flag, + "event send:%" PRI_STAT ""); + } + } + + if (interleave == i) { + ordered->sub_seq = sub_seq; + ordered->out_of_order = 0; + ordered->last_in_order = 1; + ret = em_send(event, q_ctx->dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "event send:%" PRI_STAT ""); + } + } +} + +/** + * @private + * + * EO receive function for EO B. + * + * Loops back events. + */ +static void +eo_atomic_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_ctx) +{ + eo_atomic_context_t *const eo_ctx = eo_context; + q_atomic_context_t *const q_ctx = queue_ctx; + test_event_t *const test_event = em_event_pointer(event); + const int core = em_core_id(); + ordered_event_t *ordered; + em_status_t ret; + uint64_t num_events; + uint32_t seq, sub_seq; + int out_of_order, last_in_order; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + if (test_event->ev_id == EV_ID_ORDERED_EVENT) { + ordered = &test_event->ordered; + if (ordered->is_copy) + em_free(ordered->original); + } + em_free(event); + return; + } + + if (unlikely(test_event->ev_id == EV_ID_START_EVENT)) { + /* + * Start-up only, one time: initialize the test event sending. + * Called from EO-receive to avoid mixing up events & sequence + * numbers in start-up for ordered EO-pairs (sending from the + * start functions could mess up the seqno:s since all the + * cores are already in the dispatch loop). + */ + initialize_events(&test_event->start); + em_free(event); + return; + } + + test_fatal_if(test_event->ev_id != EV_ID_ORDERED_EVENT, + "Unexpected ev-id:%d", test_event->ev_id); + + ordered = &test_event->ordered; + seq = ordered->seq; + sub_seq = ordered->sub_seq; + out_of_order = ordered->out_of_order; + last_in_order = ordered->last_in_order; + + if (ordered->is_copy) + em_free(ordered->original); + + /* Check the sequence number for events that should be in order */ + uint32_t q_ctx_seq = q_ctx->seq; + uint32_t q_ctx_sub_seq = q_ctx->sub_seq; + + if (!out_of_order && + unlikely(seq != q_ctx_seq || sub_seq != q_ctx_sub_seq)) + APPL_EXIT_FAILURE("Bad seqnbr EO:%" PRI_EO " Q:%" PRI_QUEUE "\t" + "expected:%u-%u event-seq:%u-%u core:%d\n", + eo_ctx->hdl, queue, q_ctx_seq, + q_ctx_sub_seq, seq, sub_seq, core); + + if (out_of_order) { + em_free(event); + } else if (last_in_order) { + ordered->seq = q_ctx_seq + NUM_EVENT; + ordered->sub_seq = 0; + q_ctx->seq = q_ctx_seq + 1; + q_ctx->sub_seq = 0; + ret = em_send(event, q_ctx->dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "event send:%" PRI_STAT ""); + } + } else if (!out_of_order) { + q_ctx->sub_seq = q_ctx_sub_seq + 1; + em_free(event); + } + + num_events = test_shm->core_stat[core].num_events; + + /* Update the cycle count and print results when necessary */ + if (unlikely(num_events == 0)) { + test_shm->core_stat[core].begin_cycles = env_get_cycle(); + num_events = 1; + } else if (unlikely(num_events > PRINT_EVENT_COUNT)) { + test_shm->core_stat[core].end_cycles = env_get_cycle(); + test_shm->core_stat[core].print_count += 1; + + /* Print measurement result */ + if (PRINT_ON_ALL_CORES) + print_result(&test_shm->core_stat[core]); + else if (core == 0) + print_result(&test_shm->core_stat[core]); + /* Restart the measurement */ + test_shm->core_stat[core].begin_cycles = env_get_cycle(); + num_events = 0; + } else { + num_events += 1; + } + + test_shm->core_stat[core].num_events = num_events; +} + +/** + * @private + * + * Initialize test events. Allocate and send the test events to an EO-pair. + */ +static void +initialize_events(start_event_t *const start_event) +{ + em_event_t events[NUM_EVENT]; + ordered_event_t *ordered; + int num_sent = 0; + int i, j; + + /* Alloc and send test events */ + for (i = 0; i < NUM_EVENT; i++) { + events[i] = em_alloc(sizeof(ordered_event_t), EM_EVENT_TYPE_SW, + test_shm->pool); + test_fatal_if(events[i] == EM_EVENT_UNDEF, + "Event allocation failed:%i", i); + + ordered = em_event_pointer(events[i]); + ordered->ev_id = EV_ID_ORDERED_EVENT; + ordered->seq = i; + ordered->sub_seq = 0; + } + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = NUM_EVENT / SEND_MULTI_MAX; + const int left_over = NUM_EVENT % SEND_MULTI_MAX; + + for (i = 0, j = 0; i < send_rounds; i++, j += SEND_MULTI_MAX) { + num_sent += em_send_multi(&events[j], SEND_MULTI_MAX, + start_event->ordered_queue); + } + if (left_over) { + num_sent += em_send_multi(&events[j], left_over, + start_event->ordered_queue); + } + test_fatal_if(num_sent != NUM_EVENT, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT, start_event->ordered_queue); +} + +/** + * Get queue priority value based on the index number. + * + * @param Queue index + * + * @return Queue priority value + * + * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH + */ +static int +get_queue_priority(const int queue_index) +{ + int remainder = queue_index % 5; + + if (remainder <= 1) + return EM_QUEUE_PRIO_LOW; + else if (remainder <= 3) + return EM_QUEUE_PRIO_NORMAL; + else + return EM_QUEUE_PRIO_HIGH; +} + +/** + * Prints test measurement result + */ +static void +print_result(test_stat_t *const test_stat) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event; + uint64_t print_count; + + diff = env_cycles_diff(test_stat->end_cycles, test_stat->begin_cycles); + + print_count = test_stat->print_count; + cycles_per_event = ((double)diff) / ((double)test_stat->num_events); + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + APPL_PRINT("cycles per event %.2f @%.2f MHz (core-%02i %" PRIu64 ")\n", + cycles_per_event, mhz, em_core_id(), print_count); +} diff --git a/programs/example/queue/queue_types_ag.c b/programs/example/queue/queue_types_ag.c index 426334f9..267f208f 100644 --- a/programs/example/queue/queue_types_ag.c +++ b/programs/example/queue/queue_types_ag.c @@ -1,1588 +1,1585 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine Queue Types test example with included atomic groups. - * - * The test creates several EO-pairs and sends events between the queues in - * the pair. Each EO has an input queue (of type atomic, parallel or - * parallel-ordered) or, in the case of atomic groups, three(3) input atomic - * queues that belong to the same atomic group but have different priority. - * The events sent between the queues of the EO-pair are counted and - * statistics for each pair type is printed. If the queues in the EO-pair - * retain order also this is verified. - */ - -#include -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* Number of queue type pairs (constant, don't change) */ -#define QUEUE_TYPE_PAIRS 10 -/* - * Number of test EOs and queues. Must be an even number. - * Test has NUM_EO/2 EO pairs, that send ping-pong events. - * Depending on test dynamics (e.g. single burst in atomic - * queue) only one EO of a pair might be active at a time. - */ -#define NUM_EO (8 * QUEUE_TYPE_PAIRS) -/* Max number of queues supported by the test */ -#define MAX_QUEUES (NUM_EO / QUEUE_TYPE_PAIRS * 30) -/* Number of ping-pong events per EO pair */ -#define NUM_EVENT (3 * 32) -/* Number of data bytes in the event */ -#define DATA_SIZE 64 -/* Max number of cores supported by the test */ -#define MAX_CORES 64 -/* Print stats when the number of received events reaches this value on a core*/ -#define PRINT_COUNT 0x1000000 - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -/* - * Enable atomic access checks. - * If enabled will crash the application if the atomic-processing context - * is violated, i.e. checks that events from an atomic queue are being - * processed one-by-one. - */ -#define VERIFY_ATOMIC_ACCESS 1 /* 0=False or 1=True */ -/* - * Verify that the receive func processing context works as expected - */ -#define VERIFY_PROCESSING_CONTEXT 1 /* 0=False or 1=True */ - -/* Call em_atomic_processing_end every once in a while in EO-A */ -#define CALL_ATOMIC_PROCESSING_END__A 1 /* 0=False or 1=True */ -/* Call em_atomic_processing_end every once in a while in EO-B */ -#define CALL_ATOMIC_PROCESSING_END__B 1 /* 0=False or 1=True */ - -/* Return 'TRUE' if the queue pair retains event order */ -#define ORDERED_PAIR(q_type_a, q_type_b) ( \ - (((q_type_a) == EM_QUEUE_TYPE_ATOMIC) || \ - ((q_type_a) == EM_QUEUE_TYPE_PARALLEL_ORDERED)) && \ - (((q_type_b) == EM_QUEUE_TYPE_ATOMIC) || \ - ((q_type_b) == EM_QUEUE_TYPE_PARALLEL_ORDERED))) - -#define ABS(nbr1, nbr2) (((nbr1) > (nbr2)) ? ((nbr1) - (nbr2)) : \ - ((nbr2) - (nbr1))) - -#define PRINT_CORE_STAT_FMT \ -"Stat Core-%02i: Count/PairType\t" \ -"A-A:%6" PRIu64 " P-P:%6" PRIu64 " PO-PO:%6" PRIu64 "\t" \ -"P-A:%6" PRIu64 " PO-A:%6" PRIu64 " PO-P:%6" PRIu64 "\t" \ -"AG-AG:%6" PRIu64 " AG-A:%6" PRIu64 " AG-P:%6" PRIu64 " AG-PO:%6" PRIu64 "\t" \ -"cycles/event:%.0f @%.0fMHz %" PRIu64 "\n" - -/** - * Combinations of Queue Type pairs - */ -#define NO_AG (0) -#define IN_AG (1) -typedef struct queue_type_pairs_ { - em_queue_type_t q_type[2]; - int in_atomic_group[2]; -} queue_type_pair_t; - -queue_type_pair_t queue_type_pairs[QUEUE_TYPE_PAIRS] = { - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, - { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_PARALLEL}, {NO_AG, NO_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL_ORDERED}, - {NO_AG, NO_AG} }, - { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_ATOMIC}, - {NO_AG, NO_AG} }, - { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL}, - {NO_AG, NO_AG} }, - /* With Atomic Groups for atomic queues: */ - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, IN_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, NO_AG} }, - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL}, {IN_AG, NO_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL_ORDERED}, - {IN_AG, NO_AG} }, -}; - -COMPILE_TIME_ASSERT(sizeof(queue_type_pairs) == - (QUEUE_TYPE_PAIRS * sizeof(queue_type_pair_t)), - QUEUE_TYPE_PAIRS_SIZE_ERROR); - -typedef enum { - PT_ATOMIC_ATOMIC = 0, - PT_PARALLEL_PARALLEL = 1, - PT_PARALORD_PARALORD = 2, - PT_PARALLEL_ATOMIC = 3, - PT_PARALORD_ATOMIC = 4, - PT_PARALORD_PARALLEL = 5, - /* With Atomic Groups (AG) for atomic queues: */ - PT_AG_AG = 6, - PT_AG_ATOMIC = 7, - PT_AG_PARALLEL = 8, - PT_AG_PARALORD = 9, - PT_UNDEFINED -} pair_type_t; - -/** - * Test statistics (per core) - */ -typedef union { - uint8_t u8[2 * ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - - struct { - uint64_t events; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; - /* - * Pair-Type count, i.e. the number of events belonging to - * a certain pair-type on this core - */ - uint64_t pt_count[QUEUE_TYPE_PAIRS]; - }; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, - CORE_STAT_T__SIZE_ERROR); - -/** - * Test EO context - */ -typedef struct { - em_eo_t eo_hdl; - /* EO pair retains order? 0/1 */ - int ordered_pair; - pair_type_t pair_type; - int owns_ag_queues; - em_atomic_group_t agrp_hdl; - int peer_owns_ag_queues; - /* Atomic group is also set as queue type atomic */ - em_queue_type_t q_type; - env_spinlock_t verify_atomic_access; - - void *end[0] ENV_CACHE_LINE_ALIGNED; -} eo_context_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, - EO_CTX_T__SIZE_ERROR); - -/** - * Test Queue context - */ -typedef struct { - em_queue_t q_hdl; - em_queue_type_t q_type; - int in_atomic_group; - unsigned int idx; - uint64_t seqno; - /* Number of events at the previous check-point */ - uint64_t prev_events; - /* - * Total number of events handled from the queue. - * Atomically incremented, either by __atomic_add_fetch() or - * protected by atomic context (set by queue type). - */ - uint64_t num_events ENV_CACHE_LINE_ALIGNED; - - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_context_t; - -COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, - Q_CTX_T__SIZE_ERROR); - -/* IDs stored in the event user area ID */ -#define EV_ID_START_EVENT 1 -#define EV_ID_DATA_EVENT 2 - -typedef struct { - int in_atomic_group_a; - int src_q_cnt; - em_queue_t src_queues[3]; - - int in_atomic_group_b; - int dst_q_cnt; - em_queue_t dst_queues[3]; -} start_event_uarea_t; - -typedef struct { - /* Next destination queue */ - em_queue_t dest; - em_queue_t src; - /* Sequence number */ - uint64_t seqno; -} data_event_uarea_t; - -typedef union { - start_event_uarea_t start; - data_event_uarea_t data; -} test_event_uarea_t; - -/** Data event content */ -typedef struct { - /* Test data */ - uint8_t data[DATA_SIZE]; -} data_event_t; - -typedef struct { - uint8_t u8[0]; /* no payload */ -} start_event_t; - -/** - * Test event, content identified by 'ev_id' - */ -typedef union { - start_event_t start; - data_event_t data; -} test_event_t; - -/** - * Queue Types test shared memory - */ -typedef struct { - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; - - eo_context_t eo_context[NUM_EO] ENV_CACHE_LINE_ALIGNED; - - queue_context_t queue_context[MAX_QUEUES] ENV_CACHE_LINE_ALIGNED; - - unsigned num_queues ENV_CACHE_LINE_ALIGNED; - - em_pool_t pool; - - int teardown_in_progress; -} qtypes_shm_t; - -COMPILE_TIME_ASSERT(sizeof(qtypes_shm_t) % ENV_CACHE_LINE_SIZE == 0, - QTYPES_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL qtypes_shm_t *qtypes_shm; - -/** - * Local Function Prototypes - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static void -initialize_events(const start_event_uarea_t *start); - -static void -receive_a(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_b(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -static pair_type_t -get_pair_type(queue_type_pair_t *queue_type_pair); - -static inline void -verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, - uint64_t seqno); - -static void -verify_all_queues_get_events(void); - -static inline void -verify_atomic_access__begin(eo_context_t *const eo_ctx); - -static inline void -verify_atomic_access__end(eo_context_t *const eo_ctx); - -static inline void -verify_processing_context(eo_context_t *const eo_ctx, em_queue_t queue); - -static void -print_core_stats(core_stat_t *const cstat, uint64_t print_events); - -static void -print_event_msg_string(void); - -static void -print_test_info(void); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Queue Types test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - qtypes_shm = env_shared_reserve("QueueTypesSharedMem", - sizeof(qtypes_shm_t)); - em_register_error_handler(test_error_handler); - } else { - qtypes_shm = env_shared_lookup("QueueTypesSharedMem"); - } - - if (qtypes_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, - "Queue Types test init failed on EM-core: %u\n", - em_core_id()); - } else if (core == 0) { - memset(qtypes_shm, 0, sizeof(qtypes_shm_t)); - } -} - -/** - * Startup of the Queue Types test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_atomic_group_t atomic_group; - em_eo_t eo; - em_queue_t queue_a, queue_b; - em_queue_t queue_ag_a1, queue_ag_a2, queue_ag_a3; - em_queue_t queue_ag_b1, queue_ag_b2, queue_ag_b3; - em_queue_type_t q_type_a, q_type_b; - em_status_t ret, start_ret = EM_ERROR; - eo_context_t *eo_ctx; - queue_context_t *q_ctx; - pair_type_t pair_type; - unsigned int qcnt = 0; /* queue context index */ - int in_atomic_group_a, in_atomic_group_b; - int ordered_pair; - char eo_name[EM_EO_NAME_LEN]; - char q_name[EM_QUEUE_NAME_LEN]; - char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; - int i; - uint8_t eo_idx = 0, q_idx = 0, agrp_idx = 0; - - queue_a = EM_QUEUE_UNDEF; - queue_b = EM_QUEUE_UNDEF; - - queue_ag_a1 = EM_QUEUE_UNDEF; - queue_ag_a2 = EM_QUEUE_UNDEF; - queue_ag_a3 = EM_QUEUE_UNDEF; - - queue_ag_b1 = EM_QUEUE_UNDEF; - queue_ag_b2 = EM_QUEUE_UNDEF; - queue_ag_b3 = EM_QUEUE_UNDEF; - - /* - * Create own pool with events containing user area. - */ - em_pool_cfg_t pool_cfg; - - em_pool_cfg_init(&pool_cfg); - pool_cfg.event_type = EM_EVENT_TYPE_SW; - pool_cfg.user_area.in_use = true; - pool_cfg.user_area.size = sizeof(test_event_uarea_t); - - pool_cfg.num_subpools = 1; - pool_cfg.subpool[0].size = sizeof(test_event_t); - pool_cfg.subpool[0].num = NUM_EVENT * NUM_EO; - /* no cache needed, everything allocated at start-up: */ - pool_cfg.subpool[0].cache_size = 0; - - em_pool_t pool = em_pool_create("pool:Qtypes-AG", - EM_POOL_UNDEF, &pool_cfg); - test_fatal_if(pool == EM_POOL_UNDEF, "pool create failed"); - - qtypes_shm->pool = pool; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - qtypes_shm->pool); - - test_fatal_if(qtypes_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - qtypes_shm->num_queues = 0; - qtypes_shm->teardown_in_progress = EM_FALSE; - - /* Create and start application pairs. Send initial test events */ - for (i = 0; i < (NUM_EO / 2); i++) { - q_type_a = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[0]; - in_atomic_group_a = - queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[0]; - - q_type_b = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[1]; - in_atomic_group_b = - queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[1]; - - ordered_pair = ORDERED_PAIR(q_type_a, q_type_b); - - pair_type = - get_pair_type(&queue_type_pairs[i % QUEUE_TYPE_PAIRS]); - test_fatal_if(pair_type == PT_UNDEFINED, - "Queue Pair Type UNDEFINED! (%u, %u)", - q_type_a, q_type_b); - - /* Create EO "A" */ - ret = EM_OK; - - eo_ctx = &qtypes_shm->eo_context[2 * i]; - eo_ctx->ordered_pair = ordered_pair; - eo_ctx->pair_type = pair_type; - eo_ctx->q_type = q_type_a; - eo_ctx->owns_ag_queues = in_atomic_group_a; - eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; - eo_ctx->peer_owns_ag_queues = in_atomic_group_b; - - snprintf(eo_name, sizeof(eo_name), "EO-A%" PRIu8 "", ++eo_idx); - eo_name[sizeof(eo_name) - 1] = '\0'; - eo = em_eo_create(eo_name, start, NULL, stop, NULL, receive_a, - eo_ctx); - - if (in_atomic_group_a && q_type_a == EM_QUEUE_TYPE_ATOMIC) { - snprintf(ag_name, sizeof(ag_name), "AG-A%" PRIu8 "", - ++agrp_idx); - ag_name[sizeof(ag_name) - 1] = '\0'; - atomic_group = - em_atomic_group_create(ag_name, - EM_QUEUE_GROUP_DEFAULT); - test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, - "Atomic group creation failed!"); - - eo_ctx->agrp_hdl = atomic_group; - - snprintf(q_name, sizeof(q_name), "AG:Q-A%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_ag_a1 = em_queue_create_ag(q_name, - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - snprintf(q_name, sizeof(q_name), "AG:Q-A%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_ag_a2 = em_queue_create_ag(q_name, - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - snprintf(q_name, sizeof(q_name), "AG:Q-A%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_ag_a3 = em_queue_create_ag(q_name, - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - - ret = em_eo_add_queue_sync(eo, queue_ag_a1); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_a2); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_a3); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_a1; - q_ctx->q_type = q_type_a; - q_ctx->in_atomic_group = in_atomic_group_a; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_ag_a1, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_a2; - q_ctx->q_type = q_type_a; - q_ctx->in_atomic_group = in_atomic_group_a; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_ag_a2, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_a3; - q_ctx->q_type = q_type_a; - q_ctx->in_atomic_group = in_atomic_group_a; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_ag_a3, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - } else { - snprintf(q_name, sizeof(q_name), "Q-A%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_a = em_queue_create(q_name, q_type_a, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - ret = em_eo_add_queue_sync(eo, queue_a); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_a; - q_ctx->q_type = q_type_a; - q_ctx->in_atomic_group = in_atomic_group_a; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_a, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - } - - /* update qcnt each round to avoid == 0 in recv-func */ - qtypes_shm->num_queues = qcnt; - /* Start EO-A */ - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO-A setup failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - /* Create EO "B" */ - ret = EM_OK; - - eo_ctx = &qtypes_shm->eo_context[2 * i + 1]; - eo_ctx->ordered_pair = ordered_pair; - eo_ctx->pair_type = pair_type; - eo_ctx->q_type = q_type_b; - eo_ctx->owns_ag_queues = in_atomic_group_b; - eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; - eo_ctx->peer_owns_ag_queues = in_atomic_group_a; - - snprintf(eo_name, sizeof(eo_name), "EO-B%" PRIu8 "", ++eo_idx); - eo_name[sizeof(eo_name) - 1] = '\0'; - eo = em_eo_create(eo_name, start, NULL, stop, NULL, receive_b, - eo_ctx); - - if (in_atomic_group_b && q_type_b == EM_QUEUE_TYPE_ATOMIC) { - snprintf(ag_name, sizeof(ag_name), "AG-B%" PRIu8 "", - ++agrp_idx); - ag_name[sizeof(ag_name) - 1] = '\0'; - atomic_group = - em_atomic_group_create(ag_name, - EM_QUEUE_GROUP_DEFAULT); - test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, - "Atomic group creation failed!"); - - eo_ctx->agrp_hdl = atomic_group; - - snprintf(q_name, sizeof(q_name), "AG:Q-B%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_ag_b1 = em_queue_create_ag(q_name, - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - snprintf(q_name, sizeof(q_name), "AG:Q-B%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_ag_b2 = em_queue_create_ag(q_name, - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - snprintf(q_name, sizeof(q_name), "AG:Q-B%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_ag_b3 = em_queue_create_ag(q_name, - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - - ret = em_eo_add_queue_sync(eo, queue_ag_b1); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_b2); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_b3); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_b1; - q_ctx->q_type = q_type_b; - q_ctx->in_atomic_group = in_atomic_group_b; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_ag_b1, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_b2; - q_ctx->q_type = q_type_b; - q_ctx->in_atomic_group = in_atomic_group_b; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_ag_b2, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_b3; - q_ctx->q_type = q_type_b; - q_ctx->in_atomic_group = in_atomic_group_b; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_ag_b3, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - } else { - snprintf(q_name, sizeof(q_name), "Q-B%" PRIu8 "", - ++q_idx); - q_name[sizeof(q_name) - 1] = '\0'; - queue_b = em_queue_create(q_name, q_type_b, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - ret = em_eo_add_queue_sync(eo, queue_b); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_b; - q_ctx->q_type = q_type_b; - q_ctx->in_atomic_group = in_atomic_group_b; - q_ctx->idx = qcnt++; - ret = em_queue_set_context(queue_b, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - } - - /* update qcnt each round to avoid == 0 in recv-func */ - qtypes_shm->num_queues = qcnt; - /* Start EO-B */ - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO-B setup failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - /* - * Allocate and send the startup event to the first EO of the - * pair of this round. - */ - em_event_t event = em_alloc(sizeof(test_event_t), - EM_EVENT_TYPE_SW, - qtypes_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); - - size_t uarea_size; - test_event_uarea_t *test_uarea; - - test_uarea = em_event_uarea_get(event, &uarea_size); - test_fatal_if(!test_uarea && uarea_size < sizeof(test_event_uarea_t), - "Event User Area error: ptr:%p sz:%zu < %zu", - test_uarea, uarea_size, sizeof(test_event_uarea_t)); - - ret = em_event_uarea_id_set(event, EV_ID_START_EVENT); - test_fatal_if(ret != EM_OK, - "Error setting uarea id, err:%" PRI_STAT ""); - - test_uarea->start.in_atomic_group_a = in_atomic_group_a; - if (in_atomic_group_a) { - test_uarea->start.src_q_cnt = 3; - test_uarea->start.src_queues[0] = queue_ag_a1; - test_uarea->start.src_queues[1] = queue_ag_a2; - test_uarea->start.src_queues[2] = queue_ag_a3; - } else { - test_uarea->start.src_q_cnt = 1; - test_uarea->start.src_queues[0] = queue_a; - } - - test_uarea->start.in_atomic_group_b = in_atomic_group_b; - if (in_atomic_group_b) { - test_uarea->start.dst_q_cnt = 3; - test_uarea->start.dst_queues[0] = queue_ag_b1; - test_uarea->start.dst_queues[1] = queue_ag_b2; - test_uarea->start.dst_queues[2] = queue_ag_b3; - } else { - test_uarea->start.dst_q_cnt = 1; - test_uarea->start.dst_queues[0] = queue_b; - } - - ret = em_send(event, test_uarea->start.src_queues[0]); - test_fatal_if(ret != EM_OK, "Event send:%" PRI_STAT "", ret); - } - - APPL_PRINT("\n\nqctx:%i MAX:%i\n\n", qcnt, MAX_QUEUES); - - test_fatal_if(qcnt > MAX_QUEUES || qtypes_shm->num_queues != qcnt, - "Queue context number too high!"); - - print_test_info(); -} - -/** - * Test stop function - * - * @attention Run only on one EM core - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and teardown. - */ -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - eo_context_t *eo_ctx; - int i; - - (void)appl_conf; - - /* mark 'teardown in progress' to avoid errors seq.nbr check errors */ - qtypes_shm->teardown_in_progress = EM_TRUE; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* stop all EOs */ - for (i = 0; i < NUM_EO; i++) { - eo_ctx = &qtypes_shm->eo_context[i]; - eo = eo_ctx->eo_hdl; - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO stop:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - } -} - -/** - * Termination of the 'Queue Types AG' test application. - * - * @attention Run on one EM core only - * - * @see cm_setup() for setup and teardown. - */ -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - em_status_t ret = em_pool_delete(qtypes_shm->pool); - - test_fatal_if(ret != EM_OK, - "em_pool_delete(%" PRI_POOL "):%" PRI_STAT "", - qtypes_shm->pool, ret); - - if (core == 0) { - env_shared_free(qtypes_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo_hdl = eo; - - if (VERIFY_ATOMIC_ACCESS) - env_spinlock_init(&eo_ctx->verify_atomic_access); - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - eo_context_t *const eo_ctx = (eo_context_t *)eo_context; - em_status_t ret; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - if (eo_ctx->agrp_hdl != EM_ATOMIC_GROUP_UNDEF) { - ret = em_atomic_group_delete(eo_ctx->agrp_hdl); - test_fatal_if(ret != EM_OK, - "AGrp delete:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - } - - /* delete the EO at the end of the stop-function */ - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO delete:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -static void -initialize_events(const start_event_uarea_t *start) -{ - /* - * Allocate and send test events to the EO-pair of this round - */ - const int max_q_cnt = start->src_q_cnt > start->dst_q_cnt ? - start->src_q_cnt : start->dst_q_cnt; - /* tmp storage for all events to send this round */ - em_event_t all_events[max_q_cnt][NUM_EVENT]; - /* number of events for a queue in all_events[Q][events] */ - int ev_cnt[max_q_cnt]; - uint64_t seqno = 0; - int j, x, y; - em_status_t ret; - - for (x = 0; x < max_q_cnt; x++) - ev_cnt[x] = 0; - - for (j = 0; j < NUM_EVENT;) { - for (x = 0, y = 0; x < max_q_cnt; x++, y++, j++) { - em_event_t event = em_alloc(sizeof(test_event_t), - EM_EVENT_TYPE_SW, - qtypes_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "Event alloc fails"); - - test_event_t *test_event = em_event_pointer(event); - size_t uarea_size = 0; - test_event_uarea_t *test_uarea = - em_event_uarea_get(event, &uarea_size); - - test_fatal_if(!test_event || !test_uarea || - uarea_size != sizeof(test_event_uarea_t), - "Event payload/uarea error"); - - memset(test_event, 0, sizeof(test_event_t)); - memset(test_uarea, 0, uarea_size); - - ret = em_event_uarea_id_set(event, EV_ID_DATA_EVENT); - test_fatal_if(ret != EM_OK, - "Error setting uarea id, err:%" PRI_STAT ""); - - if (start->in_atomic_group_b) - test_uarea->data.dest = start->dst_queues[y]; - else - test_uarea->data.dest = start->dst_queues[0]; - - test_uarea->data.src = start->src_queues[x]; - - if (start->in_atomic_group_a == - start->in_atomic_group_b) { - /* verify seqno (symmetric EO-pairs)*/ - test_uarea->data.seqno = seqno; - } - - all_events[x][ev_cnt[x]] = event; - ev_cnt[x] += 1; - } - seqno += 1; - } - - /* Send events to EO A */ - for (x = 0; x < max_q_cnt; x++) { - int n, m; - int num_sent = 0; - - /* Send in bursts of 'SEND_MULTI_MAX' events */ - const int send_rounds = ev_cnt[x] / SEND_MULTI_MAX; - const int left_over = ev_cnt[x] % SEND_MULTI_MAX; - - for (n = 0, m = 0; n < send_rounds; - n++, m += SEND_MULTI_MAX) { - num_sent += em_send_multi(&all_events[x][m], - SEND_MULTI_MAX, - start->src_queues[x]); - } - if (left_over) { - num_sent += em_send_multi(&all_events[x][m], left_over, - start->src_queues[x]); - } - test_fatal_if(num_sent != ev_cnt[x], - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, ev_cnt[x], start->src_queues[x]); - } -} - -/** - * @private - * - * EO receive function for EO A. - * - * Forwards events to the next processing stage (EO) - * and calculates the event rate. - */ -static void -receive_a(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_t *const q_ctx = queue_context; - em_event_uarea_info_t uarea_info; - test_event_uarea_t *test_uarea; - em_queue_t dest_queue; - uint64_t queue_events; - uint64_t seqno; - em_status_t ret; - - const int core = em_core_id(); - core_stat_t *cstat = &qtypes_shm->core_stat[core]; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - ret = em_event_uarea_info(event, &uarea_info); - test_fatal_if(ret != EM_OK, - "em_event_uarea_info() fails:%" PRI_STAT "", ret); - test_uarea = uarea_info.uarea; - - if (unlikely(uarea_info.id.value == EV_ID_START_EVENT)) { - /* - * Start-up only, one time: initialize the test event sending. - * Called from EO-receive to avoid mixing up events & sequence - * numbers in start-up for ordered EO-pairs (sending from the - * start functions could mess up the seqno:s since all the - * cores are already in the dispatch loop). - */ - initialize_events(&test_uarea->start); - em_free(event); - return; - } - - test_fatal_if(uarea_info.id.value != EV_ID_DATA_EVENT, - "Unexpected ev-id:%d", uarea_info.id.value); - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__begin(eo_ctx); - - if (VERIFY_PROCESSING_CONTEXT) - verify_processing_context(eo_ctx, queue); - - seqno = test_uarea->data.seqno; - - /* Increment Q specific event counter (parallel Qs req atomic inc:s)*/ - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) - queue_events = q_ctx->num_events++; - else - queue_events = __atomic_add_fetch(&q_ctx->num_events, 1, - __ATOMIC_RELAXED); - - test_fatal_if(test_uarea->data.src != queue, - "EO-A queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", - test_uarea->data.src, queue); - - if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { - /* Verify the seq nbr to make sure event order is maintained*/ - verify_seqno(eo_ctx, q_ctx, seqno); - } - - dest_queue = test_uarea->data.dest; - test_uarea->data.src = test_uarea->data.dest; - test_uarea->data.dest = queue; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, "EO-A em_send failure"); - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__end(eo_ctx); - - if (CALL_ATOMIC_PROCESSING_END__A) { - /* Call em_atomic_processing_end() every once in a while */ - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && - (queue_events % qtypes_shm->num_queues == q_ctx->idx)) - em_atomic_processing_end(); - } - - /* - * Update _core_ statistics after potentially releasing the - * atomic context. - */ - uint64_t core_events = cstat->events; - uint64_t print_events = 0; - - if (unlikely(core_events == 0)) { - cstat->begin_cycles = env_get_cycle(); - core_events += 1; - cstat->pt_count[eo_ctx->pair_type] += 1; - } else if (unlikely(core_events > PRINT_COUNT)) { - cstat->end_cycles = env_get_cycle(); - /* indicate that statistics should be printed this round: */ - print_events = core_events; - core_events = 0; - } else { - core_events += 1; - cstat->pt_count[eo_ctx->pair_type] += 1; - } - cstat->events = core_events; - - /* Print core specific statistics */ - if (unlikely(print_events)) { - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) - em_atomic_processing_end(); - - if (core == 0) - verify_all_queues_get_events(); - - print_core_stats(cstat, print_events); - - for (int i = 0; i < QUEUE_TYPE_PAIRS; i++) - cstat->pt_count[i] = 0; - - cstat->begin_cycles = env_get_cycle(); - } -} - -/** - * @private - * - * EO receive function for EO B. - * - * Forwards events to the next processing stage (EO). - */ -static void -receive_b(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_t *const q_ctx = queue_context; - em_queue_t dest_queue; - test_event_uarea_t *test_uarea; - uint64_t queue_events; - em_status_t ret; - - const int core = em_core_id(); - core_stat_t *cstat = &qtypes_shm->core_stat[core]; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__begin(eo_ctx); - - if (VERIFY_PROCESSING_CONTEXT) - verify_processing_context(eo_ctx, queue); - - em_event_uarea_info_t uarea_info; - - ret = em_event_uarea_info(event, &uarea_info); - test_fatal_if(ret != EM_OK, - "em_event_uarea_info() fails:%" PRI_STAT "", ret); - test_fatal_if(uarea_info.id.value != EV_ID_DATA_EVENT, - "Unexpected ev-id:%d", uarea_info.id.value); - - /* Increment Q specific event counter (parallel Qs req atomic inc:s) */ - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) - queue_events = q_ctx->num_events++; - else - queue_events = __atomic_add_fetch(&q_ctx->num_events, 1, - __ATOMIC_RELAXED); - test_uarea = uarea_info.uarea; - test_fatal_if(test_uarea->data.src != queue, - "EO-B queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", - test_uarea->data.src, queue); - - if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { - /* Verify the seq nbr to make sure event order is maintained*/ - verify_seqno(eo_ctx, q_ctx, test_uarea->data.seqno); - } - - dest_queue = test_uarea->data.dest; - test_uarea->data.src = test_uarea->data.dest; - test_uarea->data.dest = queue; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, "EO-B em_send failure"); - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__end(eo_ctx); - - if (CALL_ATOMIC_PROCESSING_END__B) { - /* Call em_atomic_processing_end() every once in a while */ - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && - (queue_events % qtypes_shm->num_queues == q_ctx->idx)) - em_atomic_processing_end(); - } - - /* - * Update _core_ statistics after potentially releasing the - * atomic context. - */ - if (unlikely(cstat->events == 0)) - cstat->begin_cycles = env_get_cycle(); - cstat->events++; - - cstat->pt_count[eo_ctx->pair_type] += 1; -} - -static pair_type_t -get_pair_type(queue_type_pair_t *queue_type_pair) -{ - em_queue_type_t qt1 = queue_type_pair->q_type[0]; - em_queue_type_t qt2 = queue_type_pair->q_type[1]; - int in_ag1 = queue_type_pair->in_atomic_group[0]; - int in_ag2 = queue_type_pair->in_atomic_group[1]; - - switch (qt1) { - case EM_QUEUE_TYPE_ATOMIC: - switch (qt2) { - case EM_QUEUE_TYPE_ATOMIC: - if (in_ag1 && in_ag2) - return PT_AG_AG; - else if (in_ag1 || in_ag2) - return PT_AG_ATOMIC; - else - return PT_ATOMIC_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL: - if (in_ag1) - return PT_AG_PARALLEL; - else - return PT_PARALLEL_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - if (in_ag1) - return PT_AG_PARALORD; - else - return PT_PARALORD_ATOMIC; - } - break; - - case EM_QUEUE_TYPE_PARALLEL: - switch (qt2) { - case EM_QUEUE_TYPE_ATOMIC: - if (in_ag2) - return PT_AG_PARALLEL; - else - return PT_PARALLEL_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL: - return PT_PARALLEL_PARALLEL; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - return PT_PARALORD_PARALLEL; - } - break; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - switch (qt2) { - case EM_QUEUE_TYPE_ATOMIC: - if (in_ag2) - return PT_AG_PARALORD; - else - return PT_PARALORD_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL: - return PT_PARALORD_PARALLEL; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - return PT_PARALORD_PARALORD; - } - break; - } - - return PT_UNDEFINED; -} - -static inline void -verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, - uint64_t seqno) -{ - if (unlikely(qtypes_shm->teardown_in_progress)) - return; - - if (eo_ctx->owns_ag_queues == eo_ctx->peer_owns_ag_queues) { - const uint64_t max_seqno = (eo_ctx->owns_ag_queues) ? - NUM_EVENT / 3 - 1 : NUM_EVENT - 1; - - if (q_ctx->seqno != seqno) { - test_error((em_status_t)__LINE__, 0xdead, - "SEQUENCE ERROR A:\t" - "queue=%" PRI_QUEUE " Q-seqno=%" PRIu64 "\t" - "Event-seqno=%" PRIu64 " PT:%i", - q_ctx->q_hdl, q_ctx->seqno, seqno, - eo_ctx->pair_type); - exit(EXIT_FAILURE); - } - - if (q_ctx->seqno < max_seqno) - q_ctx->seqno++; - else - q_ctx->seqno = 0; - } -} - -/** - * Verifies that each queue processes all its events at least once per - * statistics round. - */ -static void -verify_all_queues_get_events(void) -{ - const unsigned int num_queues = qtypes_shm->num_queues; - unsigned int i, first = 1, q_evcnt_low = 0; - uint64_t curr, prev, diff; - - for (i = 0; i < num_queues; i++) { - queue_context_t *const tmp_qctx = - &qtypes_shm->queue_context[i]; - const uint64_t min_events = (tmp_qctx->in_atomic_group) ? - NUM_EVENT / 3 : NUM_EVENT; - const char *q_type_str; - - curr = __atomic_load_n(&tmp_qctx->num_events, __ATOMIC_RELAXED); - prev = tmp_qctx->prev_events; - diff = (curr >= prev) ? - curr - prev : UINT64_MAX - prev + curr + 1; - - tmp_qctx->prev_events = curr; - - if (unlikely(diff < min_events)) { - q_evcnt_low++; - if (first) { - first = 0; - print_event_msg_string(); - } - - switch (tmp_qctx->q_type) { - case EM_QUEUE_TYPE_ATOMIC: - if (tmp_qctx->in_atomic_group) - q_type_str = "AG"; - else - q_type_str = "A "; - break; - case EM_QUEUE_TYPE_PARALLEL: - q_type_str = "P "; - break; - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - q_type_str = "PO"; - break; - - default: - q_type_str = "??"; - break; - } - - APPL_PRINT("Q=%3" PRI_QUEUE "(%s cnt:%" PRIu64 ") %c", - tmp_qctx->q_hdl, q_type_str, diff, - (q_evcnt_low % 8 == 0) ? '\n' : ' '); - } - } - - if (!first) - APPL_PRINT("\nQueue count with too few events:%u\n\n", - q_evcnt_low); -} - -/** - * Try to take a spinlock and if it fails we know that another core is - * processing an event from the same atomic queue or atomic group, which - * should never happen => fatal error! The lock is for verification only, - * no sync purpose whatsoever. - */ -static inline void -verify_atomic_access__begin(eo_context_t *const eo_ctx) -{ - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && - unlikely(!env_spinlock_trylock(&eo_ctx->verify_atomic_access))) - test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, - "EO Atomic context lost!"); -} - -/** - * Release the verification lock - */ -static inline void -verify_atomic_access__end(eo_context_t *const eo_ctx) -{ - if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) - env_spinlock_unlock(&eo_ctx->verify_atomic_access); -} - -/** - * Verify that the receive func processing context works as expected - */ -static inline void -verify_processing_context(eo_context_t *const eo_ctx, em_queue_t queue) -{ - const em_eo_t eo = eo_ctx->eo_hdl; - em_queue_t tmp_queue; - em_queue_type_t queue_type; - em_sched_context_type_t sched_type; - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_ctx, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != queue, "Invalid current queue"); - - queue_type = em_queue_get_type(queue); - sched_type = em_sched_context_type_current(&tmp_queue); - test_fatal_if(tmp_queue != queue, "Invalid queue"); - test_fatal_if(queue_type != eo_ctx->q_type, "Q-type mismatch"); - - if (queue_type == EM_QUEUE_TYPE_ATOMIC) { - test_fatal_if(sched_type != EM_SCHED_CONTEXT_TYPE_ATOMIC, - "Invalid sched context type"); - } else if (queue_type == EM_QUEUE_TYPE_PARALLEL_ORDERED) { - test_fatal_if(sched_type != EM_SCHED_CONTEXT_TYPE_ORDERED, - "Invalid sched context type"); - } else if (queue_type == EM_QUEUE_TYPE_PARALLEL) { - test_fatal_if(sched_type != EM_SCHED_CONTEXT_TYPE_NONE, - "Invalid sched context type"); - } -} - -/** - * Print core specific statistics - */ -static void -print_core_stats(core_stat_t *const cstat, uint64_t print_events) -{ - uint64_t diff; - uint32_t hz; - double mhz; - double cycles_per_event; - uint64_t print_count; - - if (cstat->end_cycles > cstat->begin_cycles) - diff = cstat->end_cycles - cstat->begin_cycles; - else - diff = UINT64_MAX - cstat->begin_cycles + cstat->end_cycles + 1; - - print_count = cstat->print_count++; - cycles_per_event = (double)diff / (double)print_events; - - hz = env_core_hz(); - mhz = ((double)hz) / 1000000.0; - - APPL_PRINT(PRINT_CORE_STAT_FMT, em_core_id(), - cstat->pt_count[0], cstat->pt_count[1], cstat->pt_count[2], - cstat->pt_count[3], cstat->pt_count[4], cstat->pt_count[5], - cstat->pt_count[6], cstat->pt_count[7], cstat->pt_count[8], - cstat->pt_count[9], cycles_per_event, mhz, print_count); -} - -static void -print_event_msg_string(void) -{ - APPL_PRINT("\nToo few events detected for the following queues:\n"); -} - -static void -print_test_info(void) -{ - unsigned int num; - - /* Print the EO list */ - em_eo_t eo = em_eo_get_first(&num); - - APPL_PRINT("%d EOs:\n", num); - while (eo != EM_EO_UNDEF) { - em_eo_state_t state; - const char *state_str; - char buf[EM_EO_NAME_LEN]; - em_queue_t q; - - state = em_eo_get_state(eo); - switch (state) { - case EM_EO_STATE_UNDEF: - state_str = "UNDEF"; - break; - case EM_EO_STATE_CREATED: - state_str = "CREATED"; - break; - case EM_EO_STATE_STARTING: - state_str = "STARTING"; - break; - case EM_EO_STATE_RUNNING: - state_str = "RUNNING"; - break; - case EM_EO_STATE_STOPPING: - state_str = "STOPPING"; - break; - case EM_EO_STATE_ERROR: - state_str = "ERROR"; - break; - default: - state_str = "UNKNOWN"; - break; - } - em_eo_get_name(eo, buf, EM_EO_NAME_LEN - 1); - APPL_PRINT(" EO:%" PRI_EO ":'%s' state:%s\n", - eo, buf, state_str); - - q = em_eo_queue_get_first(&num, eo); - while (q != EM_QUEUE_UNDEF) { - APPL_PRINT(" - Q:%" PRI_QUEUE "\n", q); - q = em_eo_queue_get_next(); - } - eo = em_eo_get_next(); - } - APPL_PRINT("\n"); - - /* Print the queue list */ - em_queue_t q = em_queue_get_first(&num); - - APPL_PRINT("%d queues:\n", num); - while (q != EM_QUEUE_UNDEF) { - em_queue_type_t type; - const char *type_str; - em_queue_t q_check; - char buf[EM_QUEUE_NAME_LEN]; - - em_queue_get_name(q, buf, EM_QUEUE_NAME_LEN - 1); - - type = em_queue_get_type(q); - switch (type) { - case EM_QUEUE_TYPE_UNDEF: - type_str = "UNDEF"; - break; - case EM_QUEUE_TYPE_ATOMIC: - type_str = "ATOMIC"; - break; - case EM_QUEUE_TYPE_PARALLEL: - type_str = "PARALLEL"; - break; - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - type_str = "ORDERED"; - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - type_str = "UNSCHEDULED"; - break; - case EM_QUEUE_TYPE_LOCAL: - type_str = "LOCAL"; - break; - case EM_QUEUE_TYPE_OUTPUT: - type_str = "OUTPUT"; - break; - default: - type_str = "UNKNOWN"; - break; - } - - APPL_PRINT(" Q:%" PRI_QUEUE ":'%s'\ttype:%s\n", - q, buf, type_str); - q_check = em_queue_find(buf); - test_fatal_if(q_check != q, "Queue mismatch:\n" - "%" PRI_QUEUE " != %" PRI_QUEUE "", - q_check, q); - q = em_queue_get_next(); - } - APPL_PRINT("\n"); - - /* Print the atomic group list */ - em_atomic_group_t ag = em_atomic_group_get_first(&num); - char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; - - APPL_PRINT("%d Atomic-Groups:\n", num); - - while (ag != EM_ATOMIC_GROUP_UNDEF) { - if (ag != EM_ATOMIC_GROUP_UNDEF) { - em_queue_t ag_queue; - em_atomic_group_t ag_check; - - em_atomic_group_get_name(ag, ag_name, sizeof(ag_name)); - APPL_PRINT(" AG:%" PRI_AGRP ":'%s'\n", ag, ag_name); - - ag_check = em_atomic_group_find(ag_name); - test_fatal_if(ag_check != ag, "AG mismatch:\n" - "%" PRI_AGRP " != %" PRI_AGRP "", - ag_check, ag); - - ag_queue = em_atomic_group_queue_get_first(&num, ag); - while (ag_queue != EM_QUEUE_UNDEF) { - APPL_PRINT(" - Q:%" PRI_QUEUE "\n", - ag_queue); - ag_queue = em_atomic_group_queue_get_next(); - } - } - ag = em_atomic_group_get_next(); - } - APPL_PRINT("\n"); -} +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine Queue Types test example with included atomic groups. + * + * The test creates several EO-pairs and sends events between the queues in + * the pair. Each EO has an input queue (of type atomic, parallel or + * parallel-ordered) or, in the case of atomic groups, three(3) input atomic + * queues that belong to the same atomic group but have different priority. + * The events sent between the queues of the EO-pair are counted and + * statistics for each pair type is printed. If the queues in the EO-pair + * retain order also this is verified. + */ + +#include +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* Number of queue type pairs (constant, don't change) */ +#define QUEUE_TYPE_PAIRS 10 +/* + * Number of test EOs and queues. Must be an even number. + * Test has NUM_EO/2 EO pairs, that send ping-pong events. + * Depending on test dynamics (e.g. single burst in atomic + * queue) only one EO of a pair might be active at a time. + */ +#define NUM_EO (8 * QUEUE_TYPE_PAIRS) +/* Max number of queues supported by the test */ +#define MAX_QUEUES (NUM_EO / QUEUE_TYPE_PAIRS * 30) +/* Number of ping-pong events per EO pair */ +#define NUM_EVENT (3 * 32) +/* Number of data bytes in the event */ +#define DATA_SIZE 64 +/* Max number of cores supported by the test */ +#define MAX_CORES 64 +/* Print stats when the number of received events reaches this value on a core*/ +#define PRINT_COUNT 0x1000000 + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/* + * Enable atomic access checks. + * If enabled will crash the application if the atomic-processing context + * is violated, i.e. checks that events from an atomic queue are being + * processed one-by-one. + */ +#define VERIFY_ATOMIC_ACCESS 1 /* 0=False or 1=True */ +/* + * Verify that the receive func processing context works as expected + */ +#define VERIFY_PROCESSING_CONTEXT 1 /* 0=False or 1=True */ + +/* Call em_atomic_processing_end every once in a while in EO-A */ +#define CALL_ATOMIC_PROCESSING_END__A 1 /* 0=False or 1=True */ +/* Call em_atomic_processing_end every once in a while in EO-B */ +#define CALL_ATOMIC_PROCESSING_END__B 1 /* 0=False or 1=True */ + +/* Return 'TRUE' if the queue pair retains event order */ +#define ORDERED_PAIR(q_type_a, q_type_b) ( \ + (((q_type_a) == EM_QUEUE_TYPE_ATOMIC) || \ + ((q_type_a) == EM_QUEUE_TYPE_PARALLEL_ORDERED)) && \ + (((q_type_b) == EM_QUEUE_TYPE_ATOMIC) || \ + ((q_type_b) == EM_QUEUE_TYPE_PARALLEL_ORDERED))) + +#define ABS(nbr1, nbr2) (((nbr1) > (nbr2)) ? ((nbr1) - (nbr2)) : \ + ((nbr2) - (nbr1))) + +#define PRINT_CORE_STAT_FMT \ +"Stat Core-%02i: Count/PairType\t" \ +"A-A:%6" PRIu64 " P-P:%6" PRIu64 " PO-PO:%6" PRIu64 "\t" \ +"P-A:%6" PRIu64 " PO-A:%6" PRIu64 " PO-P:%6" PRIu64 "\t" \ +"AG-AG:%6" PRIu64 " AG-A:%6" PRIu64 " AG-P:%6" PRIu64 " AG-PO:%6" PRIu64 "\t" \ +"cycles/event:%.0f @%.0fMHz %" PRIu64 "\n" + +/** + * Combinations of Queue Type pairs + */ +#define NO_AG (0) +#define IN_AG (1) +typedef struct queue_type_pairs_ { + em_queue_type_t q_type[2]; + int in_atomic_group[2]; +} queue_type_pair_t; + +queue_type_pair_t queue_type_pairs[QUEUE_TYPE_PAIRS] = { + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, + { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_PARALLEL}, {NO_AG, NO_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL_ORDERED}, + {NO_AG, NO_AG} }, + { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_ATOMIC}, + {NO_AG, NO_AG} }, + { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL}, + {NO_AG, NO_AG} }, + /* With Atomic Groups for atomic queues: */ + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, IN_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, NO_AG} }, + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL}, {IN_AG, NO_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL_ORDERED}, + {IN_AG, NO_AG} }, +}; + +COMPILE_TIME_ASSERT(sizeof(queue_type_pairs) == + (QUEUE_TYPE_PAIRS * sizeof(queue_type_pair_t)), + QUEUE_TYPE_PAIRS_SIZE_ERROR); + +typedef enum { + PT_ATOMIC_ATOMIC = 0, + PT_PARALLEL_PARALLEL = 1, + PT_PARALORD_PARALORD = 2, + PT_PARALLEL_ATOMIC = 3, + PT_PARALORD_ATOMIC = 4, + PT_PARALORD_PARALLEL = 5, + /* With Atomic Groups (AG) for atomic queues: */ + PT_AG_AG = 6, + PT_AG_ATOMIC = 7, + PT_AG_PARALLEL = 8, + PT_AG_PARALORD = 9, + PT_UNDEFINED +} pair_type_t; + +/** + * Test statistics (per core) + */ +typedef union { + uint8_t u8[2 * ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + + struct { + uint64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; + /* + * Pair-Type count, i.e. the number of events belonging to + * a certain pair-type on this core + */ + uint64_t pt_count[QUEUE_TYPE_PAIRS]; + }; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, + CORE_STAT_T__SIZE_ERROR); + +/** + * Test EO context + */ +typedef struct { + em_eo_t eo_hdl; + /* EO pair retains order? 0/1 */ + int ordered_pair; + pair_type_t pair_type; + int owns_ag_queues; + em_atomic_group_t agrp_hdl; + int peer_owns_ag_queues; + /* Atomic group is also set as queue type atomic */ + em_queue_type_t q_type; + env_spinlock_t verify_atomic_access; + + void *end[0] ENV_CACHE_LINE_ALIGNED; +} eo_context_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, + EO_CTX_T__SIZE_ERROR); + +/** + * Test Queue context + */ +typedef struct { + em_queue_t q_hdl; + em_queue_type_t q_type; + int in_atomic_group; + unsigned int idx; + uint64_t seqno; + /* Number of events at the previous check-point */ + uint64_t prev_events; + /* + * Total number of events handled from the queue. + * Atomically incremented, either by __atomic_add_fetch() or + * protected by atomic context (set by queue type). + */ + uint64_t num_events ENV_CACHE_LINE_ALIGNED; + + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_context_t; + +COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, + Q_CTX_T__SIZE_ERROR); + +/* IDs stored in the event user area ID */ +#define EV_ID_START_EVENT 1 +#define EV_ID_DATA_EVENT 2 + +typedef struct { + int in_atomic_group_a; + int src_q_cnt; + em_queue_t src_queues[3]; + + int in_atomic_group_b; + int dst_q_cnt; + em_queue_t dst_queues[3]; +} start_event_uarea_t; + +typedef struct { + /* Next destination queue */ + em_queue_t dest; + em_queue_t src; + /* Sequence number */ + uint64_t seqno; +} data_event_uarea_t; + +typedef union { + start_event_uarea_t start; + data_event_uarea_t data; +} test_event_uarea_t; + +/** Data event content */ +typedef struct { + /* Test data */ + uint8_t data[DATA_SIZE]; +} data_event_t; + +typedef struct { + uint8_t u8[0]; /* no payload */ +} start_event_t; + +/** + * Test event, content identified by 'ev_id' + */ +typedef union { + start_event_t start; + data_event_t data; +} test_event_t; + +/** + * Queue Types test shared memory + */ +typedef struct { + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; + + eo_context_t eo_context[NUM_EO] ENV_CACHE_LINE_ALIGNED; + + queue_context_t queue_context[MAX_QUEUES] ENV_CACHE_LINE_ALIGNED; + + unsigned num_queues ENV_CACHE_LINE_ALIGNED; + + em_pool_t pool; + + int teardown_in_progress; +} qtypes_shm_t; + +COMPILE_TIME_ASSERT(sizeof(qtypes_shm_t) % ENV_CACHE_LINE_SIZE == 0, + QTYPES_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL qtypes_shm_t *qtypes_shm; + +/** + * Local Function Prototypes + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static void +initialize_events(const start_event_uarea_t *start); + +static void +receive_a(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_b(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static pair_type_t +get_pair_type(queue_type_pair_t *queue_type_pair); + +static inline void +verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, + uint64_t seqno); + +static void +verify_all_queues_get_events(void); + +static inline void +verify_atomic_access__begin(eo_context_t *const eo_ctx); + +static inline void +verify_atomic_access__end(eo_context_t *const eo_ctx); + +static inline void +verify_processing_context(eo_context_t *const eo_ctx, em_queue_t queue); + +static void +print_core_stats(core_stat_t *const cstat, uint64_t print_events); + +static void +print_event_msg_string(void); + +static void +print_test_info(void); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Queue Types test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + qtypes_shm = env_shared_reserve("QueueTypesSharedMem", + sizeof(qtypes_shm_t)); + em_register_error_handler(test_error_handler); + } else { + qtypes_shm = env_shared_lookup("QueueTypesSharedMem"); + } + + if (qtypes_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, + "Queue Types test init failed on EM-core: %u\n", + em_core_id()); + } else if (core == 0) { + memset(qtypes_shm, 0, sizeof(qtypes_shm_t)); + } +} + +/** + * Startup of the Queue Types test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_atomic_group_t atomic_group; + em_eo_t eo; + em_queue_t queue_a, queue_b; + em_queue_t queue_ag_a1, queue_ag_a2, queue_ag_a3; + em_queue_t queue_ag_b1, queue_ag_b2, queue_ag_b3; + em_queue_type_t q_type_a, q_type_b; + em_status_t ret, start_ret = EM_ERROR; + eo_context_t *eo_ctx; + queue_context_t *q_ctx; + pair_type_t pair_type; + unsigned int qcnt = 0; /* queue context index */ + int in_atomic_group_a, in_atomic_group_b; + int ordered_pair; + char eo_name[EM_EO_NAME_LEN]; + char q_name[EM_QUEUE_NAME_LEN]; + char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; + int i; + uint8_t eo_idx = 0, q_idx = 0, agrp_idx = 0; + + queue_a = EM_QUEUE_UNDEF; + queue_b = EM_QUEUE_UNDEF; + + queue_ag_a1 = EM_QUEUE_UNDEF; + queue_ag_a2 = EM_QUEUE_UNDEF; + queue_ag_a3 = EM_QUEUE_UNDEF; + + queue_ag_b1 = EM_QUEUE_UNDEF; + queue_ag_b2 = EM_QUEUE_UNDEF; + queue_ag_b3 = EM_QUEUE_UNDEF; + + /* + * Create own pool with events containing user area. + */ + em_pool_cfg_t pool_cfg; + + em_pool_cfg_init(&pool_cfg); + pool_cfg.event_type = EM_EVENT_TYPE_SW; + pool_cfg.user_area.in_use = true; + pool_cfg.user_area.size = sizeof(test_event_uarea_t); + + pool_cfg.num_subpools = 1; + pool_cfg.subpool[0].size = sizeof(test_event_t); + pool_cfg.subpool[0].num = NUM_EVENT * NUM_EO; + /* no cache needed, everything allocated at start-up: */ + pool_cfg.subpool[0].cache_size = 0; + + em_pool_t pool = em_pool_create("pool:Qtypes-AG", + EM_POOL_UNDEF, &pool_cfg); + test_fatal_if(pool == EM_POOL_UNDEF, "pool create failed"); + + qtypes_shm->pool = pool; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + qtypes_shm->pool); + + test_fatal_if(qtypes_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + qtypes_shm->num_queues = 0; + qtypes_shm->teardown_in_progress = EM_FALSE; + + /* Create and start application pairs. Send initial test events */ + for (i = 0; i < (NUM_EO / 2); i++) { + q_type_a = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[0]; + in_atomic_group_a = + queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[0]; + + q_type_b = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[1]; + in_atomic_group_b = + queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[1]; + + ordered_pair = ORDERED_PAIR(q_type_a, q_type_b); + + pair_type = + get_pair_type(&queue_type_pairs[i % QUEUE_TYPE_PAIRS]); + test_fatal_if(pair_type == PT_UNDEFINED, + "Queue Pair Type UNDEFINED! (%u, %u)", + q_type_a, q_type_b); + + /* Create EO "A" */ + ret = EM_OK; + + eo_ctx = &qtypes_shm->eo_context[2 * i]; + eo_ctx->ordered_pair = ordered_pair; + eo_ctx->pair_type = pair_type; + eo_ctx->q_type = q_type_a; + eo_ctx->owns_ag_queues = in_atomic_group_a; + eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; + eo_ctx->peer_owns_ag_queues = in_atomic_group_b; + + snprintf(eo_name, sizeof(eo_name), "EO-A%" PRIu8 "", ++eo_idx); + eo_name[sizeof(eo_name) - 1] = '\0'; + eo = em_eo_create(eo_name, start, NULL, stop, NULL, receive_a, + eo_ctx); + + if (in_atomic_group_a && q_type_a == EM_QUEUE_TYPE_ATOMIC) { + snprintf(ag_name, sizeof(ag_name), "AG-A%" PRIu8 "", + ++agrp_idx); + ag_name[sizeof(ag_name) - 1] = '\0'; + atomic_group = + em_atomic_group_create(ag_name, + EM_QUEUE_GROUP_DEFAULT); + test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, + "Atomic group creation failed!"); + + eo_ctx->agrp_hdl = atomic_group; + + snprintf(q_name, sizeof(q_name), "AG:Q-A%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_ag_a1 = em_queue_create_ag(q_name, + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + snprintf(q_name, sizeof(q_name), "AG:Q-A%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_ag_a2 = em_queue_create_ag(q_name, + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + snprintf(q_name, sizeof(q_name), "AG:Q-A%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_ag_a3 = em_queue_create_ag(q_name, + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + + ret = em_eo_add_queue_sync(eo, queue_ag_a1); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_a2); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_a3); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_a1; + q_ctx->q_type = q_type_a; + q_ctx->in_atomic_group = in_atomic_group_a; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_ag_a1, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_a2; + q_ctx->q_type = q_type_a; + q_ctx->in_atomic_group = in_atomic_group_a; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_ag_a2, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_a3; + q_ctx->q_type = q_type_a; + q_ctx->in_atomic_group = in_atomic_group_a; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_ag_a3, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + } else { + snprintf(q_name, sizeof(q_name), "Q-A%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_a = em_queue_create(q_name, q_type_a, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + ret = em_eo_add_queue_sync(eo, queue_a); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_a; + q_ctx->q_type = q_type_a; + q_ctx->in_atomic_group = in_atomic_group_a; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_a, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + } + + /* update qcnt each round to avoid == 0 in recv-func */ + qtypes_shm->num_queues = qcnt; + /* Start EO-A */ + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO-A setup failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + /* Create EO "B" */ + ret = EM_OK; + + eo_ctx = &qtypes_shm->eo_context[2 * i + 1]; + eo_ctx->ordered_pair = ordered_pair; + eo_ctx->pair_type = pair_type; + eo_ctx->q_type = q_type_b; + eo_ctx->owns_ag_queues = in_atomic_group_b; + eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; + eo_ctx->peer_owns_ag_queues = in_atomic_group_a; + + snprintf(eo_name, sizeof(eo_name), "EO-B%" PRIu8 "", ++eo_idx); + eo_name[sizeof(eo_name) - 1] = '\0'; + eo = em_eo_create(eo_name, start, NULL, stop, NULL, receive_b, + eo_ctx); + + if (in_atomic_group_b && q_type_b == EM_QUEUE_TYPE_ATOMIC) { + snprintf(ag_name, sizeof(ag_name), "AG-B%" PRIu8 "", + ++agrp_idx); + ag_name[sizeof(ag_name) - 1] = '\0'; + atomic_group = + em_atomic_group_create(ag_name, + EM_QUEUE_GROUP_DEFAULT); + test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, + "Atomic group creation failed!"); + + eo_ctx->agrp_hdl = atomic_group; + + snprintf(q_name, sizeof(q_name), "AG:Q-B%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_ag_b1 = em_queue_create_ag(q_name, + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + snprintf(q_name, sizeof(q_name), "AG:Q-B%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_ag_b2 = em_queue_create_ag(q_name, + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + snprintf(q_name, sizeof(q_name), "AG:Q-B%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_ag_b3 = em_queue_create_ag(q_name, + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + + ret = em_eo_add_queue_sync(eo, queue_ag_b1); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_b2); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_b3); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_b1; + q_ctx->q_type = q_type_b; + q_ctx->in_atomic_group = in_atomic_group_b; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_ag_b1, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_b2; + q_ctx->q_type = q_type_b; + q_ctx->in_atomic_group = in_atomic_group_b; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_ag_b2, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_b3; + q_ctx->q_type = q_type_b; + q_ctx->in_atomic_group = in_atomic_group_b; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_ag_b3, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + } else { + snprintf(q_name, sizeof(q_name), "Q-B%" PRIu8 "", + ++q_idx); + q_name[sizeof(q_name) - 1] = '\0'; + queue_b = em_queue_create(q_name, q_type_b, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + ret = em_eo_add_queue_sync(eo, queue_b); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_b; + q_ctx->q_type = q_type_b; + q_ctx->in_atomic_group = in_atomic_group_b; + q_ctx->idx = qcnt++; + ret = em_queue_set_context(queue_b, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + } + + /* update qcnt each round to avoid == 0 in recv-func */ + qtypes_shm->num_queues = qcnt; + /* Start EO-B */ + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO-B setup failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + /* + * Allocate and send the startup event to the first EO of the + * pair of this round. + */ + em_event_t event = em_alloc(sizeof(test_event_t), + EM_EVENT_TYPE_SW, + qtypes_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + + size_t uarea_size; + test_event_uarea_t *test_uarea; + + test_uarea = em_event_uarea_get(event, &uarea_size); + test_fatal_if(!test_uarea && uarea_size < sizeof(test_event_uarea_t), + "Event User Area error: ptr:%p sz:%zu < %zu", + test_uarea, uarea_size, sizeof(test_event_uarea_t)); + + ret = em_event_uarea_id_set(event, EV_ID_START_EVENT); + test_fatal_if(ret != EM_OK, + "Error setting uarea id, err:%" PRI_STAT ""); + + test_uarea->start.in_atomic_group_a = in_atomic_group_a; + if (in_atomic_group_a) { + test_uarea->start.src_q_cnt = 3; + test_uarea->start.src_queues[0] = queue_ag_a1; + test_uarea->start.src_queues[1] = queue_ag_a2; + test_uarea->start.src_queues[2] = queue_ag_a3; + } else { + test_uarea->start.src_q_cnt = 1; + test_uarea->start.src_queues[0] = queue_a; + } + + test_uarea->start.in_atomic_group_b = in_atomic_group_b; + if (in_atomic_group_b) { + test_uarea->start.dst_q_cnt = 3; + test_uarea->start.dst_queues[0] = queue_ag_b1; + test_uarea->start.dst_queues[1] = queue_ag_b2; + test_uarea->start.dst_queues[2] = queue_ag_b3; + } else { + test_uarea->start.dst_q_cnt = 1; + test_uarea->start.dst_queues[0] = queue_b; + } + + ret = em_send(event, test_uarea->start.src_queues[0]); + test_fatal_if(ret != EM_OK, "Event send:%" PRI_STAT "", ret); + } + + APPL_PRINT("\n\nqctx:%i MAX:%i\n\n", qcnt, MAX_QUEUES); + + test_fatal_if(qcnt > MAX_QUEUES || qtypes_shm->num_queues != qcnt, + "Queue context number too high!"); + + print_test_info(); +} + +/** + * Test stop function + * + * @attention Run only on one EM core + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and teardown. + */ +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + eo_context_t *eo_ctx; + int i; + + (void)appl_conf; + + /* mark 'teardown in progress' to avoid errors seq.nbr check errors */ + qtypes_shm->teardown_in_progress = EM_TRUE; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* stop all EOs */ + for (i = 0; i < NUM_EO; i++) { + eo_ctx = &qtypes_shm->eo_context[i]; + eo = eo_ctx->eo_hdl; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO stop:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + } +} + +/** + * Termination of the 'Queue Types AG' test application. + * + * @attention Run on one EM core only + * + * @see cm_setup() for setup and teardown. + */ +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + em_status_t ret = em_pool_delete(qtypes_shm->pool); + + test_fatal_if(ret != EM_OK, + "em_pool_delete(%" PRI_POOL "):%" PRI_STAT "", + qtypes_shm->pool, ret); + + if (core == 0) { + env_shared_free(qtypes_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo_hdl = eo; + + if (VERIFY_ATOMIC_ACCESS) + env_spinlock_init(&eo_ctx->verify_atomic_access); + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + eo_context_t *const eo_ctx = (eo_context_t *)eo_context; + em_status_t ret; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + if (eo_ctx->agrp_hdl != EM_ATOMIC_GROUP_UNDEF) { + ret = em_atomic_group_delete(eo_ctx->agrp_hdl); + test_fatal_if(ret != EM_OK, + "AGrp delete:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + } + + /* delete the EO at the end of the stop-function */ + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO delete:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +static void +initialize_events(const start_event_uarea_t *start) +{ + /* + * Allocate and send test events to the EO-pair of this round + */ + const int max_q_cnt = start->src_q_cnt > start->dst_q_cnt ? + start->src_q_cnt : start->dst_q_cnt; + /* tmp storage for all events to send this round */ + em_event_t all_events[max_q_cnt][NUM_EVENT]; + /* number of events for a queue in all_events[Q][events] */ + int ev_cnt[max_q_cnt]; + uint64_t seqno = 0; + int j, x, y; + em_status_t ret; + + for (x = 0; x < max_q_cnt; x++) + ev_cnt[x] = 0; + + for (j = 0; j < NUM_EVENT;) { + for (x = 0, y = 0; x < max_q_cnt; x++, y++, j++) { + em_event_t event = em_alloc(sizeof(test_event_t), + EM_EVENT_TYPE_SW, + qtypes_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "Event alloc fails"); + + test_event_t *test_event = em_event_pointer(event); + size_t uarea_size = 0; + test_event_uarea_t *test_uarea = + em_event_uarea_get(event, &uarea_size); + + test_fatal_if(!test_event || !test_uarea || + uarea_size != sizeof(test_event_uarea_t), + "Event payload/uarea error"); + + memset(test_event, 0, sizeof(test_event_t)); + memset(test_uarea, 0, uarea_size); + + ret = em_event_uarea_id_set(event, EV_ID_DATA_EVENT); + test_fatal_if(ret != EM_OK, + "Error setting uarea id, err:%" PRI_STAT ""); + + if (start->in_atomic_group_b) + test_uarea->data.dest = start->dst_queues[y]; + else + test_uarea->data.dest = start->dst_queues[0]; + + test_uarea->data.src = start->src_queues[x]; + + if (start->in_atomic_group_a == + start->in_atomic_group_b) { + /* verify seqno (symmetric EO-pairs)*/ + test_uarea->data.seqno = seqno; + } + + all_events[x][ev_cnt[x]] = event; + ev_cnt[x] += 1; + } + seqno += 1; + } + + /* Send events to EO A */ + for (x = 0; x < max_q_cnt; x++) { + int n, m; + int num_sent = 0; + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = ev_cnt[x] / SEND_MULTI_MAX; + const int left_over = ev_cnt[x] % SEND_MULTI_MAX; + + for (n = 0, m = 0; n < send_rounds; + n++, m += SEND_MULTI_MAX) { + num_sent += em_send_multi(&all_events[x][m], + SEND_MULTI_MAX, + start->src_queues[x]); + } + if (left_over) { + num_sent += em_send_multi(&all_events[x][m], left_over, + start->src_queues[x]); + } + test_fatal_if(num_sent != ev_cnt[x], + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, ev_cnt[x], start->src_queues[x]); + } +} + +/** + * @private + * + * EO receive function for EO A. + * + * Forwards events to the next processing stage (EO) + * and calculates the event rate. + */ +static void +receive_a(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_t *const q_ctx = queue_context; + em_event_uarea_info_t uarea_info; + test_event_uarea_t *test_uarea; + em_queue_t dest_queue; + uint64_t queue_events; + uint64_t seqno; + em_status_t ret; + + const int core = em_core_id(); + core_stat_t *cstat = &qtypes_shm->core_stat[core]; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + ret = em_event_uarea_info(event, &uarea_info); + test_fatal_if(ret != EM_OK, + "em_event_uarea_info() fails:%" PRI_STAT "", ret); + test_uarea = uarea_info.uarea; + + if (unlikely(uarea_info.id.value == EV_ID_START_EVENT)) { + /* + * Start-up only, one time: initialize the test event sending. + * Called from EO-receive to avoid mixing up events & sequence + * numbers in start-up for ordered EO-pairs (sending from the + * start functions could mess up the seqno:s since all the + * cores are already in the dispatch loop). + */ + initialize_events(&test_uarea->start); + em_free(event); + return; + } + + test_fatal_if(uarea_info.id.value != EV_ID_DATA_EVENT, + "Unexpected ev-id:%d", uarea_info.id.value); + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__begin(eo_ctx); + + if (VERIFY_PROCESSING_CONTEXT) + verify_processing_context(eo_ctx, queue); + + seqno = test_uarea->data.seqno; + + /* Increment Q specific event counter (parallel Qs req atomic inc:s)*/ + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) + queue_events = q_ctx->num_events++; + else + queue_events = __atomic_add_fetch(&q_ctx->num_events, 1, + __ATOMIC_RELAXED); + + test_fatal_if(test_uarea->data.src != queue, + "EO-A queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", + test_uarea->data.src, queue); + + if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { + /* Verify the seq nbr to make sure event order is maintained*/ + verify_seqno(eo_ctx, q_ctx, seqno); + } + + dest_queue = test_uarea->data.dest; + test_uarea->data.src = test_uarea->data.dest; + test_uarea->data.dest = queue; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, "EO-A em_send failure"); + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__end(eo_ctx); + + if (CALL_ATOMIC_PROCESSING_END__A) { + /* Call em_atomic_processing_end() every once in a while */ + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && + (queue_events % qtypes_shm->num_queues == q_ctx->idx)) + em_atomic_processing_end(); + } + + /* + * Update _core_ statistics after potentially releasing the + * atomic context. + */ + uint64_t core_events = cstat->events; + uint64_t print_events = 0; + + if (unlikely(core_events == 0)) { + cstat->begin_cycles = env_get_cycle(); + core_events += 1; + cstat->pt_count[eo_ctx->pair_type] += 1; + } else if (unlikely(core_events > PRINT_COUNT)) { + cstat->end_cycles = env_get_cycle(); + /* indicate that statistics should be printed this round: */ + print_events = core_events; + core_events = 0; + } else { + core_events += 1; + cstat->pt_count[eo_ctx->pair_type] += 1; + } + cstat->events = core_events; + + /* Print core specific statistics */ + if (unlikely(print_events)) { + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) + em_atomic_processing_end(); + + if (core == 0) + verify_all_queues_get_events(); + + print_core_stats(cstat, print_events); + + for (int i = 0; i < QUEUE_TYPE_PAIRS; i++) + cstat->pt_count[i] = 0; + + cstat->begin_cycles = env_get_cycle(); + } +} + +/** + * @private + * + * EO receive function for EO B. + * + * Forwards events to the next processing stage (EO). + */ +static void +receive_b(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_t *const q_ctx = queue_context; + em_queue_t dest_queue; + test_event_uarea_t *test_uarea; + uint64_t queue_events; + em_status_t ret; + + const int core = em_core_id(); + core_stat_t *cstat = &qtypes_shm->core_stat[core]; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__begin(eo_ctx); + + if (VERIFY_PROCESSING_CONTEXT) + verify_processing_context(eo_ctx, queue); + + em_event_uarea_info_t uarea_info; + + ret = em_event_uarea_info(event, &uarea_info); + test_fatal_if(ret != EM_OK, + "em_event_uarea_info() fails:%" PRI_STAT "", ret); + test_fatal_if(uarea_info.id.value != EV_ID_DATA_EVENT, + "Unexpected ev-id:%d", uarea_info.id.value); + + /* Increment Q specific event counter (parallel Qs req atomic inc:s) */ + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) + queue_events = q_ctx->num_events++; + else + queue_events = __atomic_add_fetch(&q_ctx->num_events, 1, + __ATOMIC_RELAXED); + test_uarea = uarea_info.uarea; + test_fatal_if(test_uarea->data.src != queue, + "EO-B queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", + test_uarea->data.src, queue); + + if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { + /* Verify the seq nbr to make sure event order is maintained*/ + verify_seqno(eo_ctx, q_ctx, test_uarea->data.seqno); + } + + dest_queue = test_uarea->data.dest; + test_uarea->data.src = test_uarea->data.dest; + test_uarea->data.dest = queue; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, "EO-B em_send failure"); + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__end(eo_ctx); + + if (CALL_ATOMIC_PROCESSING_END__B) { + /* Call em_atomic_processing_end() every once in a while */ + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && + (queue_events % qtypes_shm->num_queues == q_ctx->idx)) + em_atomic_processing_end(); + } + + /* + * Update _core_ statistics after potentially releasing the + * atomic context. + */ + if (unlikely(cstat->events == 0)) + cstat->begin_cycles = env_get_cycle(); + cstat->events++; + + cstat->pt_count[eo_ctx->pair_type] += 1; +} + +static pair_type_t +get_pair_type(queue_type_pair_t *queue_type_pair) +{ + em_queue_type_t qt1 = queue_type_pair->q_type[0]; + em_queue_type_t qt2 = queue_type_pair->q_type[1]; + int in_ag1 = queue_type_pair->in_atomic_group[0]; + int in_ag2 = queue_type_pair->in_atomic_group[1]; + + switch (qt1) { + case EM_QUEUE_TYPE_ATOMIC: + switch (qt2) { + case EM_QUEUE_TYPE_ATOMIC: + if (in_ag1 && in_ag2) + return PT_AG_AG; + else if (in_ag1 || in_ag2) + return PT_AG_ATOMIC; + else + return PT_ATOMIC_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL: + if (in_ag1) + return PT_AG_PARALLEL; + else + return PT_PARALLEL_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + if (in_ag1) + return PT_AG_PARALORD; + else + return PT_PARALORD_ATOMIC; + } + break; + + case EM_QUEUE_TYPE_PARALLEL: + switch (qt2) { + case EM_QUEUE_TYPE_ATOMIC: + if (in_ag2) + return PT_AG_PARALLEL; + else + return PT_PARALLEL_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL: + return PT_PARALLEL_PARALLEL; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + return PT_PARALORD_PARALLEL; + } + break; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + switch (qt2) { + case EM_QUEUE_TYPE_ATOMIC: + if (in_ag2) + return PT_AG_PARALORD; + else + return PT_PARALORD_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL: + return PT_PARALORD_PARALLEL; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + return PT_PARALORD_PARALORD; + } + break; + } + + return PT_UNDEFINED; +} + +static inline void +verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, + uint64_t seqno) +{ + if (unlikely(qtypes_shm->teardown_in_progress)) + return; + + if (eo_ctx->owns_ag_queues == eo_ctx->peer_owns_ag_queues) { + const uint64_t max_seqno = (eo_ctx->owns_ag_queues) ? + NUM_EVENT / 3 - 1 : NUM_EVENT - 1; + + if (q_ctx->seqno != seqno) { + test_error((em_status_t)__LINE__, 0xdead, + "SEQUENCE ERROR A:\t" + "queue=%" PRI_QUEUE " Q-seqno=%" PRIu64 "\t" + "Event-seqno=%" PRIu64 " PT:%i", + q_ctx->q_hdl, q_ctx->seqno, seqno, + eo_ctx->pair_type); + exit(EXIT_FAILURE); + } + + if (q_ctx->seqno < max_seqno) + q_ctx->seqno++; + else + q_ctx->seqno = 0; + } +} + +/** + * Verifies that each queue processes all its events at least once per + * statistics round. + */ +static void +verify_all_queues_get_events(void) +{ + const unsigned int num_queues = qtypes_shm->num_queues; + unsigned int i, first = 1, q_evcnt_low = 0; + uint64_t curr, prev, diff; + + for (i = 0; i < num_queues; i++) { + queue_context_t *const tmp_qctx = + &qtypes_shm->queue_context[i]; + const uint64_t min_events = (tmp_qctx->in_atomic_group) ? + NUM_EVENT / 3 : NUM_EVENT; + const char *q_type_str; + + curr = __atomic_load_n(&tmp_qctx->num_events, __ATOMIC_RELAXED); + prev = tmp_qctx->prev_events; + diff = (curr >= prev) ? + curr - prev : UINT64_MAX - prev + curr + 1; + + tmp_qctx->prev_events = curr; + + if (unlikely(diff < min_events)) { + q_evcnt_low++; + if (first) { + first = 0; + print_event_msg_string(); + } + + switch (tmp_qctx->q_type) { + case EM_QUEUE_TYPE_ATOMIC: + if (tmp_qctx->in_atomic_group) + q_type_str = "AG"; + else + q_type_str = "A "; + break; + case EM_QUEUE_TYPE_PARALLEL: + q_type_str = "P "; + break; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + q_type_str = "PO"; + break; + + default: + q_type_str = "??"; + break; + } + + APPL_PRINT("Q=%3" PRI_QUEUE "(%s cnt:%" PRIu64 ") %c", + tmp_qctx->q_hdl, q_type_str, diff, + (q_evcnt_low % 8 == 0) ? '\n' : ' '); + } + } + + if (!first) + APPL_PRINT("\nQueue count with too few events:%u\n\n", + q_evcnt_low); +} + +/** + * Try to take a spinlock and if it fails we know that another core is + * processing an event from the same atomic queue or atomic group, which + * should never happen => fatal error! The lock is for verification only, + * no sync purpose whatsoever. + */ +static inline void +verify_atomic_access__begin(eo_context_t *const eo_ctx) +{ + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && + unlikely(!env_spinlock_trylock(&eo_ctx->verify_atomic_access))) + test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, + "EO Atomic context lost!"); +} + +/** + * Release the verification lock + */ +static inline void +verify_atomic_access__end(eo_context_t *const eo_ctx) +{ + if (eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) + env_spinlock_unlock(&eo_ctx->verify_atomic_access); +} + +/** + * Verify that the receive func processing context works as expected + */ +static inline void +verify_processing_context(eo_context_t *const eo_ctx, em_queue_t queue) +{ + const em_eo_t eo = eo_ctx->eo_hdl; + em_queue_t tmp_queue; + em_queue_type_t queue_type; + em_sched_context_type_t sched_type; + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_ctx, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != queue, "Invalid current queue"); + + queue_type = em_queue_get_type(queue); + sched_type = em_sched_context_type_current(&tmp_queue); + test_fatal_if(tmp_queue != queue, "Invalid queue"); + test_fatal_if(queue_type != eo_ctx->q_type, "Q-type mismatch"); + + if (queue_type == EM_QUEUE_TYPE_ATOMIC) { + test_fatal_if(sched_type != EM_SCHED_CONTEXT_TYPE_ATOMIC, + "Invalid sched context type"); + } else if (queue_type == EM_QUEUE_TYPE_PARALLEL_ORDERED) { + test_fatal_if(sched_type != EM_SCHED_CONTEXT_TYPE_ORDERED, + "Invalid sched context type"); + } else if (queue_type == EM_QUEUE_TYPE_PARALLEL) { + test_fatal_if(sched_type != EM_SCHED_CONTEXT_TYPE_NONE, + "Invalid sched context type"); + } +} + +/** + * Print core specific statistics + */ +static void +print_core_stats(core_stat_t *const cstat, uint64_t print_events) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event; + uint64_t print_count; + + diff = env_cycles_diff(cstat->end_cycles, cstat->begin_cycles); + + print_count = cstat->print_count++; + cycles_per_event = (double)diff / (double)print_events; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + APPL_PRINT(PRINT_CORE_STAT_FMT, em_core_id(), + cstat->pt_count[0], cstat->pt_count[1], cstat->pt_count[2], + cstat->pt_count[3], cstat->pt_count[4], cstat->pt_count[5], + cstat->pt_count[6], cstat->pt_count[7], cstat->pt_count[8], + cstat->pt_count[9], cycles_per_event, mhz, print_count); +} + +static void +print_event_msg_string(void) +{ + APPL_PRINT("\nToo few events detected for the following queues:\n"); +} + +static void +print_test_info(void) +{ + unsigned int num; + + /* Print the EO list */ + em_eo_t eo = em_eo_get_first(&num); + + APPL_PRINT("%d EOs:\n", num); + while (eo != EM_EO_UNDEF) { + em_eo_state_t state; + const char *state_str; + char buf[EM_EO_NAME_LEN]; + em_queue_t q; + + state = em_eo_get_state(eo); + switch (state) { + case EM_EO_STATE_UNDEF: + state_str = "UNDEF"; + break; + case EM_EO_STATE_CREATED: + state_str = "CREATED"; + break; + case EM_EO_STATE_STARTING: + state_str = "STARTING"; + break; + case EM_EO_STATE_RUNNING: + state_str = "RUNNING"; + break; + case EM_EO_STATE_STOPPING: + state_str = "STOPPING"; + break; + case EM_EO_STATE_ERROR: + state_str = "ERROR"; + break; + default: + state_str = "UNKNOWN"; + break; + } + em_eo_get_name(eo, buf, EM_EO_NAME_LEN - 1); + APPL_PRINT(" EO:%" PRI_EO ":'%s' state:%s\n", + eo, buf, state_str); + + q = em_eo_queue_get_first(&num, eo); + while (q != EM_QUEUE_UNDEF) { + APPL_PRINT(" - Q:%" PRI_QUEUE "\n", q); + q = em_eo_queue_get_next(); + } + eo = em_eo_get_next(); + } + APPL_PRINT("\n"); + + /* Print the queue list */ + em_queue_t q = em_queue_get_first(&num); + + APPL_PRINT("%d queues:\n", num); + while (q != EM_QUEUE_UNDEF) { + em_queue_type_t type; + const char *type_str; + em_queue_t q_check; + char buf[EM_QUEUE_NAME_LEN]; + + em_queue_get_name(q, buf, EM_QUEUE_NAME_LEN - 1); + + type = em_queue_get_type(q); + switch (type) { + case EM_QUEUE_TYPE_UNDEF: + type_str = "UNDEF"; + break; + case EM_QUEUE_TYPE_ATOMIC: + type_str = "ATOMIC"; + break; + case EM_QUEUE_TYPE_PARALLEL: + type_str = "PARALLEL"; + break; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + type_str = "ORDERED"; + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + type_str = "UNSCHEDULED"; + break; + case EM_QUEUE_TYPE_LOCAL: + type_str = "LOCAL"; + break; + case EM_QUEUE_TYPE_OUTPUT: + type_str = "OUTPUT"; + break; + default: + type_str = "UNKNOWN"; + break; + } + + APPL_PRINT(" Q:%" PRI_QUEUE ":'%s'\ttype:%s\n", + q, buf, type_str); + q_check = em_queue_find(buf); + test_fatal_if(q_check != q, "Queue mismatch:\n" + "%" PRI_QUEUE " != %" PRI_QUEUE "", + q_check, q); + q = em_queue_get_next(); + } + APPL_PRINT("\n"); + + /* Print the atomic group list */ + em_atomic_group_t ag = em_atomic_group_get_first(&num); + char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; + + APPL_PRINT("%d Atomic-Groups:\n", num); + + while (ag != EM_ATOMIC_GROUP_UNDEF) { + if (ag != EM_ATOMIC_GROUP_UNDEF) { + em_queue_t ag_queue; + em_atomic_group_t ag_check; + + em_atomic_group_get_name(ag, ag_name, sizeof(ag_name)); + APPL_PRINT(" AG:%" PRI_AGRP ":'%s'\n", ag, ag_name); + + ag_check = em_atomic_group_find(ag_name); + test_fatal_if(ag_check != ag, "AG mismatch:\n" + "%" PRI_AGRP " != %" PRI_AGRP "", + ag_check, ag); + + ag_queue = em_atomic_group_queue_get_first(&num, ag); + while (ag_queue != EM_QUEUE_UNDEF) { + APPL_PRINT(" - Q:%" PRI_QUEUE "\n", + ag_queue); + ag_queue = em_atomic_group_queue_get_next(); + } + } + ag = em_atomic_group_get_next(); + } + APPL_PRINT("\n"); +} diff --git a/programs/example/queue/queue_types_local.c b/programs/example/queue/queue_types_local.c index da29b4b4..0437974f 100644 --- a/programs/example/queue/queue_types_local.c +++ b/programs/example/queue/queue_types_local.c @@ -1,1595 +1,1592 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine Queue Types test/example with included local queues. - * - * Similar to the queue_types_ag.c example but additionally adds local queues - * between the scheduled queues. - * See programs/example/queue/queue_types_ag.c - */ - -#include -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* Number of queue type pairs (constant, don't change) */ -#define QUEUE_TYPE_PAIRS 10 -/* - * Number of test EOs and queues. Must be an even number. - * Test has NUM_EO/2 EO pairs, that send ping-pong events. - * Depending on test dynamics (e.g. single burst in atomic - * queue) only one EO of a pair might be active at a time. - */ -#define NUM_EO (2 * 8 * QUEUE_TYPE_PAIRS) -/* Max number of queues supported by the test */ -#define MAX_QUEUES (NUM_EO / QUEUE_TYPE_PAIRS * 30) -/* Number of ping-pong events per EO pair */ -#define NUM_EVENT (3 * 32) -/* Number of data bytes in the event */ -#define DATA_SIZE 64 -/* Max number of cores supported by the test */ -#define MAX_CORES 64 -/* Print stats when the number of received events reaches this value on a core*/ -#define PRINT_COUNT 0x1000000 - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -/* - * Enable atomic access checks. - * If enabled will crash the application if the atomic-processing context - * is violated, i.e. checks that events from an atomic queue are being - * processed one-by-one. - */ -#define VERIFY_ATOMIC_ACCESS 1 /* 0=False or 1=True */ -/* Call em_atomic_processing_end every once in a while */ -#define CALL_ATOMIC_PROCESSING_END 1 /* 0=False or 1=True */ - -/* Return 'TRUE' if the queue pair retains event order */ -#define ORDERED_PAIR(q_type_a, q_type_b) ( \ - (((q_type_a) == EM_QUEUE_TYPE_ATOMIC) || \ - ((q_type_a) == EM_QUEUE_TYPE_PARALLEL_ORDERED)) && \ - (((q_type_b) == EM_QUEUE_TYPE_ATOMIC) || \ - ((q_type_b) == EM_QUEUE_TYPE_PARALLEL_ORDERED))) - -#define ABS(nbr1, nbr2) (((nbr1) > (nbr2)) ? ((nbr1) - (nbr2)) : \ - ((nbr2) - (nbr1))) - -#define PRINT_CORE_STAT_FMT \ -"Core-%02i:\t" \ -"A-L-A-L:%6" PRIu64 " P-L-P-L:%6" PRIu64 " PO-L-PO-L:%6" PRIu64 "\t" \ -"P-L-A-L:%6" PRIu64 " PO-L-A-L:%6" PRIu64 " PO-L-P-L:%6" PRIu64 "\t" \ -"AG-L-AG-L:%6" PRIu64 " AG-L-A-L:%6" PRIu64 "\t" \ -"AG-L-P-L:%6" PRIu64 " AG-L-PO-L:%6" PRIu64 "\t" \ -"cycles/event:%.0f @%.0fMHz %" PRIu64 "\n" - -/** - * Combinations of Queue Type pairs - */ -#define NO_AG (0) -#define IN_AG (1) -typedef struct queue_type_pairs_ { - em_queue_type_t q_type[2]; - int in_atomic_group[2]; -} queue_type_pair_t; - -queue_type_pair_t queue_type_pairs[QUEUE_TYPE_PAIRS] = { - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, - { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_PARALLEL}, {NO_AG, NO_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL_ORDERED}, - {NO_AG, NO_AG} }, - { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_ATOMIC}, - {NO_AG, NO_AG} }, - { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL}, - {NO_AG, NO_AG} }, - /* With Atomic Groups for atomic queues: */ - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, IN_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, NO_AG} }, - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL}, {IN_AG, NO_AG} }, - /* Ordered Pair */ - { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL_ORDERED}, - {IN_AG, NO_AG} }, -}; - -COMPILE_TIME_ASSERT(sizeof(queue_type_pairs) == - (QUEUE_TYPE_PAIRS * sizeof(queue_type_pair_t)), - QUEUE_TYPE_PAIRS_SIZE_ERROR); - -typedef enum { - PT_ATOMIC_ATOMIC = 0, - PT_PARALLEL_PARALLEL = 1, - PT_PARALORD_PARALORD = 2, - PT_PARALLEL_ATOMIC = 3, - PT_PARALORD_ATOMIC = 4, - PT_PARALORD_PARALLEL = 5, - /* With Atomic Groups (AG) for atomic queues: */ - PT_AG_AG = 6, - PT_AG_ATOMIC = 7, - PT_AG_PARALLEL = 8, - PT_AG_PARALORD = 9, - PT_UNDEFINED -} pair_type_t; - -/** - * Test statistics (per core) - */ -typedef union { - uint8_t u8[2 * ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - - struct { - uint64_t events; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; - /* - * Pair-Type count, i.e. the number of events belonging to - * a certain pair-type on this core - */ - uint64_t pt_count[QUEUE_TYPE_PAIRS]; - }; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, - CORE_STAT_T__SIZE_ERROR); - -/** - * Test EO context - */ -typedef struct { - em_eo_t eo_hdl; - /* EO pair retains order? 0/1 */ - int ordered_pair; - pair_type_t pair_type; - int owns_ag_queues; - em_atomic_group_t agrp_hdl; - int peer_owns_ag_queues; - /* Atomic group is also set as queue type atomic */ - em_queue_type_t q_type; - env_spinlock_t verify_atomic_access; - - void *end[0] ENV_CACHE_LINE_ALIGNED; -} eo_context_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, - EO_CTX_T__SIZE_ERROR); - -/** - * Test Queue context - */ -typedef struct { - em_queue_t q_hdl; - em_queue_type_t q_type; - unsigned int idx; - - union { - struct { - /* Associated local queue */ - em_queue_t local_q_hdl; - int in_atomic_group; - uint64_t seqno; - /* Total number of events handled from the queue */ - env_atomic64_t num_events; - /* Number of events at the previous check-point */ - uint64_t prev_events; - } sched; - - struct { - /* Total number of events handled from the queue */ - uint64_t num_events; - /* Number of events at the previous check-point */ - uint64_t prev_events; - } local; - }; - - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_context_t; - -COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, - Q_CTX_T__SIZE_ERROR); - -#define EV_ID_DATA_EVENT 1 -#define EV_ID_START_EVENT 2 -/** Data event content */ -typedef struct { - int ev_id; - /* Next destination queue */ - em_queue_t dest; - em_queue_t src; - /* Sequence number */ - uint64_t seqno; - /* Test data */ - uint8_t data[DATA_SIZE]; -} data_event_t; -/** Startup event content */ -typedef struct { - int ev_id; - - int in_atomic_group_a; - int src_q_cnt; - em_queue_t src_queues[3]; - - int in_atomic_group_b; - int dst_q_cnt; - em_queue_t dst_queues[3]; -} start_event_t; -/** - * Test event, content identified by 'ev_id' - */ -typedef union { - int ev_id; - data_event_t data; - start_event_t start; -} test_event_t; - -/** - * Queue Types test shared memory - */ -typedef struct { - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; - - eo_context_t eo_context[NUM_EO] ENV_CACHE_LINE_ALIGNED; - - queue_context_t queue_context[MAX_QUEUES] ENV_CACHE_LINE_ALIGNED; - - unsigned num_queues ENV_CACHE_LINE_ALIGNED; - - em_pool_t pool; - - int teardown_in_progress; -} qtypes_shm_t; - -COMPILE_TIME_ASSERT(sizeof(qtypes_shm_t) % ENV_CACHE_LINE_SIZE == 0, - QTYPES_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL qtypes_shm_t *qtypes_shm; - -/** - * Local Function Prototypes - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t -start_locq(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t -start_local(void *eo_ctx, em_eo_t eo); -static em_status_t -start_local_locq(void *eo_ctx, em_eo_t eo); - -static em_status_t -stop(void *eo_context, em_eo_t eo); -static em_status_t -stop_locq(void *eo_context, em_eo_t eo); -static em_status_t -stop_local(void *eo_ctx, em_eo_t eo); -static em_status_t -stop_local_locq(void *eo_ctx, em_eo_t eo); - -static void -receive_a(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_b(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_locq(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -static pair_type_t -get_pair_type(queue_type_pair_t *queue_type_pair); - -static inline void -verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, - uint64_t seqno); - -static void -verify_all_queues_get_events(void); - -static inline void -verify_atomic_access__begin(eo_context_t *const eo_ctx); - -static inline void -verify_atomic_access__end(eo_context_t *const eo_ctx); - -static void -print_core_stats(core_stat_t *const cstat, uint64_t print_events); - -static void -print_event_msg_string(void); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Queue Types test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - qtypes_shm = env_shared_reserve("QueueTypesSharedMem", - sizeof(qtypes_shm_t)); - em_register_error_handler(test_error_handler); - } else { - qtypes_shm = env_shared_lookup("QueueTypesSharedMem"); - } - - if (qtypes_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, - "Queue Types test init failed on EM-core: %u\n", - em_core_id()); - } else if (core == 0) { - memset(qtypes_shm, 0, sizeof(qtypes_shm_t)); - } -} - -/** - * Startup of the Queue Types test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_atomic_group_t atomic_group; - em_eo_t eo, eo_locq; - em_queue_t queue_a, queue_b; - em_queue_t queue_ag_a1, queue_ag_a2, queue_ag_a3; - em_queue_t queue_ag_b1, queue_ag_b2, queue_ag_b3; - em_queue_t queue_local_a, queue_local_b; - em_queue_type_t q_type_a, q_type_b; - em_status_t ret, start_ret = EM_ERROR; - eo_context_t *eo_ctx; - queue_context_t *q_ctx; - pair_type_t pair_type; - unsigned int qcnt = 0; /* queue context index */ - unsigned int eocnt = 0; /* eo context index */ - int in_atomic_group_a, in_atomic_group_b; - int ordered_pair; - char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; - int i; - - queue_a = EM_QUEUE_UNDEF; - queue_b = EM_QUEUE_UNDEF; - - queue_ag_a1 = EM_QUEUE_UNDEF; - queue_ag_a2 = EM_QUEUE_UNDEF; - queue_ag_a3 = EM_QUEUE_UNDEF; - - queue_ag_b1 = EM_QUEUE_UNDEF; - queue_ag_b2 = EM_QUEUE_UNDEF; - queue_ag_b3 = EM_QUEUE_UNDEF; - - queue_local_a = EM_QUEUE_UNDEF; - queue_local_b = EM_QUEUE_UNDEF; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - qtypes_shm->pool = appl_conf->pools[0]; - else - qtypes_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - qtypes_shm->pool); - - test_fatal_if(qtypes_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - test_fatal_if(em_eo_current() != EM_EO_UNDEF, - "Invalid current EO"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - - qtypes_shm->num_queues = 0; - qtypes_shm->teardown_in_progress = EM_FALSE; - - /* Create and start application pipelines. Send initial test events */ - for (i = 0; i < (NUM_EO / 4); i++) { - q_type_a = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[0]; - in_atomic_group_a = - queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[0]; - - q_type_b = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[1]; - in_atomic_group_b = - queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[1]; - - ordered_pair = ORDERED_PAIR(q_type_a, q_type_b); - - pair_type = - get_pair_type(&queue_type_pairs[i % QUEUE_TYPE_PAIRS]); - test_fatal_if(pair_type == PT_UNDEFINED, - "Queue Pair Type UNDEFINED! (%u, %u)", - q_type_a, q_type_b); - - /* Create EO "local-A" */ - eo_ctx = &qtypes_shm->eo_context[eocnt++]; - eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; - - eo_locq = em_eo_create("EO-local-A", - start_locq, start_local_locq, - stop_locq, stop_local_locq, - receive_locq, eo_ctx); - queue_local_a = em_queue_create("queue-local-A", - EM_QUEUE_TYPE_LOCAL, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_UNDEF, NULL); - ret = em_eo_add_queue_sync(eo_locq, queue_local_a); - test_fatal_if(ret != EM_OK, "EO-local-A setup failed!"); - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_local_a; - q_ctx->q_type = EM_QUEUE_TYPE_LOCAL; - q_ctx->idx = qcnt++; - - ret = em_queue_set_context(queue_local_a, q_ctx); - test_fatal_if(ret != EM_OK, "EO-local-A setup failed!"); - - ret = em_eo_start_sync(eo_locq, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO-local-A setup:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - /* Create EO "A" */ - eo_ctx = &qtypes_shm->eo_context[eocnt++]; - eo_ctx->ordered_pair = ordered_pair; - eo_ctx->pair_type = pair_type; - eo_ctx->q_type = q_type_a; - eo_ctx->owns_ag_queues = in_atomic_group_a; - eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; - eo_ctx->peer_owns_ag_queues = in_atomic_group_b; - - eo = em_eo_create("EO-A", start, NULL, stop, NULL, receive_a, - eo_ctx); - - if (in_atomic_group_a && q_type_a == EM_QUEUE_TYPE_ATOMIC) { - atomic_group = - em_atomic_group_create("group_a", - EM_QUEUE_GROUP_DEFAULT); - test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, - "Atomic group creation failed!"); - - if (em_atomic_group_get_name(atomic_group, ag_name, - EM_ATOMIC_GROUP_NAME_LEN)) - APPL_PRINT("New atomic group:%s for EO:\t" - "%" PRI_EO "\n", ag_name, eo); - - eo_ctx->agrp_hdl = atomic_group; - - queue_ag_a1 = em_queue_create_ag("AG:Q A1", - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - queue_ag_a2 = em_queue_create_ag("AG:Q A2", - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - queue_ag_a3 = em_queue_create_ag("AG:Q A3", - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - - ret = em_eo_add_queue_sync(eo, queue_ag_a1); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_a2); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_a3); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_a1; - q_ctx->q_type = q_type_a; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_a; - q_ctx->sched.in_atomic_group = in_atomic_group_a; - - ret = em_queue_set_context(queue_ag_a1, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_a2; - q_ctx->q_type = q_type_a; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_a; - q_ctx->sched.in_atomic_group = in_atomic_group_a; - - ret = em_queue_set_context(queue_ag_a2, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_a3; - q_ctx->q_type = q_type_a; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_a; - q_ctx->sched.in_atomic_group = in_atomic_group_a; - - ret = em_queue_set_context(queue_ag_a3, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - } else { - queue_a = em_queue_create("queue A", q_type_a, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - ret = em_eo_add_queue_sync(eo, queue_a); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_a; - q_ctx->q_type = q_type_a; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_a; - q_ctx->sched.in_atomic_group = in_atomic_group_a; - - ret = em_queue_set_context(queue_a, q_ctx); - test_fatal_if(ret != EM_OK, "EO-A setup failed!"); - } - - /* Start EO-A */ - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO-A setup failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - /* Create EO "local-B" */ - eo_ctx = &qtypes_shm->eo_context[eocnt++]; - eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; - - eo_locq = em_eo_create("EO-local-B", start_locq, NULL, - stop_locq, NULL, receive_locq, eo_ctx); - queue_local_b = em_queue_create("queue-local-B", - EM_QUEUE_TYPE_LOCAL, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_UNDEF, NULL); - ret = em_eo_add_queue_sync(eo_locq, queue_local_b); - test_fatal_if(ret != EM_OK, "EO-local-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_local_b; - q_ctx->q_type = EM_QUEUE_TYPE_LOCAL; - q_ctx->idx = qcnt++; - - ret = em_queue_set_context(queue_local_b, q_ctx); - test_fatal_if(ret != EM_OK, "EO-local-B setup failed!"); - - ret = em_eo_start_sync(eo_locq, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO-local-B setup:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - test_fatal_if(ret != EM_OK, "EO-local-B setup failed!"); - - /* Create EO "B" */ - eo_ctx = &qtypes_shm->eo_context[eocnt++]; - eo_ctx->ordered_pair = ordered_pair; - eo_ctx->pair_type = pair_type; - eo_ctx->q_type = q_type_b; - eo_ctx->owns_ag_queues = in_atomic_group_b; - eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; - eo_ctx->peer_owns_ag_queues = in_atomic_group_a; - - eo = em_eo_create("EO-B", start, start_local, stop, stop_local, - receive_b, eo_ctx); - - if (in_atomic_group_b && q_type_b == EM_QUEUE_TYPE_ATOMIC) { - atomic_group = - em_atomic_group_create("group_b", - EM_QUEUE_GROUP_DEFAULT); - test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, - "Atomic group creation failed!"); - - if (em_atomic_group_get_name(atomic_group, ag_name, - EM_ATOMIC_GROUP_NAME_LEN)) - APPL_PRINT("New atomic group:%s for EO:\t" - "%" PRI_EO "\n", ag_name, eo); - - eo_ctx->agrp_hdl = atomic_group; - - queue_ag_b1 = em_queue_create_ag("AG:Q B1", - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - queue_ag_b2 = em_queue_create_ag("AG:Q B2", - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - queue_ag_b3 = em_queue_create_ag("AG:Q B3", - EM_QUEUE_PRIO_NORMAL, - atomic_group, NULL); - - ret = em_eo_add_queue_sync(eo, queue_ag_b1); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_b2); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - ret = em_eo_add_queue_sync(eo, queue_ag_b3); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_b1; - q_ctx->q_type = q_type_b; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_b; - q_ctx->sched.in_atomic_group = in_atomic_group_b; - ret = em_queue_set_context(queue_ag_b1, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_b2; - q_ctx->q_type = q_type_b; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_b; - q_ctx->sched.in_atomic_group = in_atomic_group_b; - ret = em_queue_set_context(queue_ag_b2, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_ag_b3; - q_ctx->q_type = q_type_b; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_b; - q_ctx->sched.in_atomic_group = in_atomic_group_b; - ret = em_queue_set_context(queue_ag_b3, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - } else { - queue_b = em_queue_create("queue B", q_type_b, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - ret = em_eo_add_queue_sync(eo, queue_b); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - - q_ctx = &qtypes_shm->queue_context[qcnt]; - q_ctx->q_hdl = queue_b; - q_ctx->q_type = q_type_b; - q_ctx->idx = qcnt++; - q_ctx->sched.local_q_hdl = queue_local_b; - q_ctx->sched.in_atomic_group = in_atomic_group_b; - ret = em_queue_set_context(queue_b, q_ctx); - test_fatal_if(ret != EM_OK, "EO-B setup failed!"); - } - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO-B setup failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - /* update qcnt each round to avoid == 0 in recv-func */ - qtypes_shm->num_queues = qcnt; - - APPL_PRINT("\n"); - /* - * Allocate and send the startup event to the first EO of the - * pair of this round. - */ - em_event_t event = em_alloc(sizeof(start_event_t), - EM_EVENT_TYPE_SW, - qtypes_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); - start_event_t *start_event = em_event_pointer(event); - - start_event->ev_id = EV_ID_START_EVENT; - - start_event->in_atomic_group_a = in_atomic_group_a; - if (in_atomic_group_a) { - start_event->src_q_cnt = 3; - start_event->src_queues[0] = queue_ag_a1; - start_event->src_queues[1] = queue_ag_a2; - start_event->src_queues[2] = queue_ag_a3; - } else { - start_event->src_q_cnt = 1; - start_event->src_queues[0] = queue_a; - } - - start_event->in_atomic_group_b = in_atomic_group_b; - if (in_atomic_group_b) { - start_event->dst_q_cnt = 3; - start_event->dst_queues[0] = queue_ag_b1; - start_event->dst_queues[1] = queue_ag_b2; - start_event->dst_queues[2] = queue_ag_b3; - } else { - start_event->dst_q_cnt = 1; - start_event->dst_queues[0] = queue_b; - } - - ret = em_send(event, start_event->src_queues[0]); - test_fatal_if(ret != EM_OK, "Event send:%" PRI_STAT "", ret); - } - - APPL_PRINT("\n\nQs:%i MAX:%i\n", qcnt, MAX_QUEUES); - APPL_PRINT("EOs:%i MAX:%i\n\n", eocnt, NUM_EO); - qtypes_shm->num_queues = qcnt; - - test_fatal_if(qcnt > MAX_QUEUES, "Queue context number too high!"); -} - -/** - * Test stop function - * - * @attention Run only on one EM core - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and teardown. - */ -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - eo_context_t *eo_ctx; - int i; - - (void)appl_conf; - - /* mark 'teardown in progress' to avoid errors seq.nbr check errors */ - qtypes_shm->teardown_in_progress = EM_TRUE; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* stop all EOs */ - for (i = 0; i < NUM_EO; i++) { - eo_ctx = &qtypes_shm->eo_context[i]; - eo = eo_ctx->eo_hdl; - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO stop:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - } -} - -/** - * Termination of the 'Queue Types Local' test application. - * - * @attention Run on one EM core only - * - * @see cm_setup() for setup and teardown. - */ -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(qtypes_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting ", eo); - - eo_ctx->eo_hdl = eo; - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - - if (VERIFY_ATOMIC_ACCESS) - env_spinlock_init(&eo_ctx->verify_atomic_access); - - return EM_OK; -} - -/** - * @private - * - * EO local-start function. - */ -static em_status_t -start_local(void *eo_context, em_eo_t eo) -{ - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - return EM_OK; -} - -/** - * @private - * - * EO start function for EOs handling the local queues. - */ -static em_status_t -start_locq(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO-locq %" PRI_EO " starting ", eo); - - eo_ctx->eo_hdl = eo; - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - - return EM_OK; -} - -/** - * @private - * - * EO local-start function for EOs handling the local queues. - */ -static em_status_t -start_local_locq(void *eo_context, em_eo_t eo) -{ - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - eo_context_t *const eo_ctx = (eo_context_t *)eo_context; - em_status_t ret; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - if (eo_ctx->agrp_hdl != EM_ATOMIC_GROUP_UNDEF) { - ret = em_atomic_group_delete(eo_ctx->agrp_hdl); - test_fatal_if(ret != EM_OK, - "AGrp delete:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - } - - /* delete the EO at the end of the stop-function */ - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO delete:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * @private - * - * EO local-stop function. - */ -static em_status_t -stop_local(void *eo_context, em_eo_t eo) -{ - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - return EM_OK; -} - -/** - * @private - * - * EO stop function for EOs handling the local queues. - */ -static em_status_t -stop_locq(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("EO-locq %" PRI_EO " stopping.\n", eo); - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - - return EM_OK; -} - -/** - * @private - * - * EO local-stop function for EOs handling the local queues. - */ -static em_status_t -stop_local_locq(void *eo_context, em_eo_t eo) -{ - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo, "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, - "Invalid current queue"); - return EM_OK; -} - -static void -initialize_events(start_event_t *const start) -{ - /* - * Allocate and send test events to the EO-pair of this round - */ - const int max_q_cnt = start->src_q_cnt > start->dst_q_cnt ? - start->src_q_cnt : start->dst_q_cnt; - /* tmp storage for all events to send this round */ - em_event_t all_events[max_q_cnt][NUM_EVENT]; - /* number of events for a queue in all_events[Q][events] */ - int ev_cnt[max_q_cnt]; - uint64_t seqno = 0; - int j, x, y; - - for (x = 0; x < max_q_cnt; x++) - ev_cnt[x] = 0; - - for (j = 0; j < NUM_EVENT;) { - for (x = 0, y = 0; x < max_q_cnt; x++, y++, j++) { - em_event_t event = em_alloc(sizeof(test_event_t), - EM_EVENT_TYPE_SW, - qtypes_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "Event alloc fails"); - - test_event_t *const test_event = - em_event_pointer(event); - - memset(test_event, 0, sizeof(test_event_t)); - test_event->ev_id = EV_ID_DATA_EVENT; - - if (start->in_atomic_group_b) - test_event->data.dest = start->dst_queues[y]; - else - test_event->data.dest = start->dst_queues[0]; - - test_event->data.src = start->src_queues[x]; - - if (start->in_atomic_group_a == - start->in_atomic_group_b) { - /* verify seqno (symmetric EO-pairs)*/ - test_event->data.seqno = seqno; - } - - all_events[x][ev_cnt[x]] = event; - ev_cnt[x] += 1; - } - seqno += 1; - } - - /* Send events to EO A */ - for (x = 0; x < max_q_cnt; x++) { - int n, m; - int num_sent = 0; - - /* Send in bursts of 'SEND_MULTI_MAX' events */ - const int send_rounds = ev_cnt[x] / SEND_MULTI_MAX; - const int left_over = ev_cnt[x] % SEND_MULTI_MAX; - - for (n = 0, m = 0; n < send_rounds; - n++, m += SEND_MULTI_MAX) { - num_sent += em_send_multi(&all_events[x][m], - SEND_MULTI_MAX, - start->src_queues[x]); - } - if (left_over) { - num_sent += em_send_multi(&all_events[x][m], left_over, - start->src_queues[x]); - } - test_fatal_if(num_sent != ev_cnt[x], - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, ev_cnt[x], start->src_queues[x]); - } -} - -/** - * @private - * - * EO receive function for EO A. - * - * Forwards events to the next processing stage (EO) - * and calculates the event rate. - */ -static void -receive_a(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_t *const q_ctx = queue_context; - test_event_t *const test_event = em_event_pointer(event); - data_event_t *data_event; - core_stat_t *cstat; - em_queue_t dest_queue; - int core; - uint64_t core_events, print_events = 0; - uint64_t seqno; - em_status_t ret; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(test_event->ev_id == EV_ID_START_EVENT)) { - /* - * Start-up only, one time: initialize the test event sending. - * Called from EO-receive to avoid mixing up events & sequence - * numbers in start-up for ordered EO-pairs (sending from the - * start functions could mess up the seqno:s since all the - * cores are already in the dispatch loop). - */ - initialize_events(&test_event->start); - em_free(event); - return; - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__begin(eo_ctx); - - test_fatal_if(test_event->ev_id != EV_ID_DATA_EVENT, - "Unexpected ev-id:%d", test_event->ev_id); - data_event = &test_event->data; - - core = em_core_id(); - cstat = &qtypes_shm->core_stat[core]; - - core_events = cstat->events; - seqno = data_event->seqno; - - /* Increment Q specific event counter (parallel Qs req atomic inc:s)*/ - env_atomic64_inc(&q_ctx->sched.num_events); - - test_fatal_if(data_event->src != queue, - "EO-A queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", - data_event->src, queue); - - if (unlikely(core_events == 0)) { - cstat->begin_cycles = env_get_cycle(); - core_events += 1; - cstat->pt_count[eo_ctx->pair_type] += 1; - } else if (unlikely(core_events > PRINT_COUNT)) { - cstat->end_cycles = env_get_cycle(); - /* indicate that statistics should be printed this round: */ - print_events = core_events; - core_events = 0; - } else { - core_events += 1; - cstat->pt_count[eo_ctx->pair_type] += 1; - } - - if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { - /* Verify the seq nbr to make sure event order is maintained*/ - verify_seqno(eo_ctx, q_ctx, seqno); - } - - dest_queue = q_ctx->sched.local_q_hdl; - data_event->src = queue; - - cstat->events = core_events; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, "EO-A em_send failure"); - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__end(eo_ctx); - - /* Print core specific statistics */ - if (unlikely(print_events)) { - int i; - - if (core == 0) - verify_all_queues_get_events(); - - print_core_stats(cstat, print_events); - - for (i = 0; i < QUEUE_TYPE_PAIRS; i++) - cstat->pt_count[i] = 0; - - /* - * Test: Verify that EO & queue _current() and - * _get_context() APIs work as expected. - */ - test_fatal_if(em_eo_current() != eo_ctx->eo_hdl, - "Invalid current EO"); - test_fatal_if(em_eo_get_context(eo_ctx->eo_hdl) != eo_context, - "Invalid current EO context"); - test_fatal_if(em_queue_current() != queue, - "Invalid current queue"); - test_fatal_if(em_queue_get_context(queue) != queue_context, - "Invalid current EO context"); - - cstat->begin_cycles = env_get_cycle(); - } -} - -/** - * @private - * - * EO receive function for EO B. - * - * Forwards events to the next processing stage (EO). - */ -static void -receive_b(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_t *const q_ctx = queue_context; - core_stat_t *cstat; - em_queue_t dest_queue; - test_event_t *test_event; - data_event_t *data_event; - int core; - uint64_t core_events; - em_status_t ret; - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__begin(eo_ctx); - - test_event = em_event_pointer(event); - test_fatal_if(test_event->ev_id != EV_ID_DATA_EVENT, - "Unexpected ev-id:%d", test_event->ev_id); - data_event = &test_event->data; - - core = em_core_id(); - cstat = &qtypes_shm->core_stat[core]; - core_events = cstat->events; - - /* Increment Q specific event counter (parallel Qs req atomic inc:s)*/ - env_atomic64_inc(&q_ctx->sched.num_events); - - test_fatal_if(data_event->src != queue, - "EO-B queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", - data_event->src, queue); - - if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { - /* Verify the seq nbr to make sure event order is maintained*/ - verify_seqno(eo_ctx, q_ctx, data_event->seqno); - } - - dest_queue = q_ctx->sched.local_q_hdl; - data_event->src = queue; - - if (unlikely(core_events == 0)) - cstat->begin_cycles = env_get_cycle(); - core_events++; - - cstat->events = core_events; - cstat->pt_count[eo_ctx->pair_type] += 1; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, "EO-B em_send failure"); - } - - if (VERIFY_ATOMIC_ACCESS) - verify_atomic_access__end(eo_ctx); -} - -/** - * @private - * - * EO receive function for EOs handling the local queues. - * - * Forwards events to the next processing stage (EO). - */ -static void -receive_locq(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_t *const q_ctx = queue_context; - test_event_t *const test_event = em_event_pointer(event); - data_event_t *data_event; - em_queue_t dest_queue; - em_status_t ret; - uint64_t queue_events; - - (void)type; - (void)queue; - (void)eo_ctx; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - test_fatal_if(test_event->ev_id != EV_ID_DATA_EVENT, - "Unexpected ev-id:%d", test_event->ev_id); - data_event = &test_event->data; - - queue_events = q_ctx->local.num_events++; - - dest_queue = data_event->dest; - - data_event->dest = data_event->src; - data_event->src = dest_queue; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, "EO-local em_send failure"); - } - - if (CALL_ATOMIC_PROCESSING_END) { - /* Call em_atomic_processing_end() every once in a while */ - if (queue_events % qtypes_shm->num_queues == q_ctx->idx) - em_atomic_processing_end(); - } -} - -static pair_type_t -get_pair_type(queue_type_pair_t *queue_type_pair) -{ - em_queue_type_t qt1 = queue_type_pair->q_type[0]; - em_queue_type_t qt2 = queue_type_pair->q_type[1]; - int in_ag1 = queue_type_pair->in_atomic_group[0]; - int in_ag2 = queue_type_pair->in_atomic_group[1]; - - switch (qt1) { - case EM_QUEUE_TYPE_ATOMIC: - switch (qt2) { - case EM_QUEUE_TYPE_ATOMIC: - if (in_ag1 && in_ag2) - return PT_AG_AG; - else if (in_ag1 || in_ag2) - return PT_AG_ATOMIC; - else - return PT_ATOMIC_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL: - if (in_ag1) - return PT_AG_PARALLEL; - else - return PT_PARALLEL_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - if (in_ag1) - return PT_AG_PARALORD; - else - return PT_PARALORD_ATOMIC; - } - break; - - case EM_QUEUE_TYPE_PARALLEL: - switch (qt2) { - case EM_QUEUE_TYPE_ATOMIC: - if (in_ag2) - return PT_AG_PARALLEL; - else - return PT_PARALLEL_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL: - return PT_PARALLEL_PARALLEL; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - return PT_PARALORD_PARALLEL; - } - break; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - switch (qt2) { - case EM_QUEUE_TYPE_ATOMIC: - if (in_ag2) - return PT_AG_PARALORD; - else - return PT_PARALORD_ATOMIC; - - case EM_QUEUE_TYPE_PARALLEL: - return PT_PARALORD_PARALLEL; - - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - return PT_PARALORD_PARALORD; - } - break; - } - - return PT_UNDEFINED; -} - -static inline void -verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, - uint64_t seqno) -{ - if (unlikely(qtypes_shm->teardown_in_progress)) - return; - - if (eo_ctx->owns_ag_queues == eo_ctx->peer_owns_ag_queues) { - const uint64_t max_seqno = (eo_ctx->owns_ag_queues) ? - NUM_EVENT / 3 - 1 : NUM_EVENT - 1; - - if (q_ctx->sched.seqno != seqno) { - test_error((em_status_t)__LINE__, 0xdead, - "SEQUENCE ERROR A:\t" - "queue=%" PRI_QUEUE " Q-seqno=%" PRIu64 "\t" - "Event-seqno=%" PRIu64 " PT:%i", - q_ctx->q_hdl, q_ctx->sched.seqno, seqno, - eo_ctx->pair_type); - exit(EXIT_FAILURE); - } - - if (q_ctx->sched.seqno < max_seqno) - q_ctx->sched.seqno++; - else - q_ctx->sched.seqno = 0; - } -} - -/** - * Verifies that each queue processes all its events at least once per - * statistics round. - */ -static void -verify_all_queues_get_events(void) -{ - const unsigned int num_queues = qtypes_shm->num_queues; - unsigned int i, first = 1, q_evcnt_low = 0; - uint64_t curr, prev, diff; - - for (i = 0; i < num_queues; i++) { - queue_context_t *const tmp_qctx = - &qtypes_shm->queue_context[i]; - const uint64_t min_events = (tmp_qctx->sched.in_atomic_group) ? - NUM_EVENT / 3 : NUM_EVENT; - - if (tmp_qctx->q_type == EM_QUEUE_TYPE_LOCAL) { - curr = tmp_qctx->local.num_events; - prev = tmp_qctx->local.prev_events; - tmp_qctx->local.prev_events = curr; - } else { - curr = env_atomic64_get(&tmp_qctx->sched.num_events); - prev = tmp_qctx->sched.prev_events; - tmp_qctx->sched.prev_events = curr; - } - - diff = (curr >= prev) ? - curr - prev : UINT64_MAX - prev + curr + 1; - - if (unlikely(diff < min_events)) { - const char *q_type_str; - - q_evcnt_low++; - if (first) { - first = 0; - print_event_msg_string(); - } - - switch (tmp_qctx->q_type) { - case EM_QUEUE_TYPE_ATOMIC: - if (tmp_qctx->sched.in_atomic_group) - q_type_str = "AG"; - else - q_type_str = "A "; - break; - case EM_QUEUE_TYPE_PARALLEL: - q_type_str = "P "; - break; - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - q_type_str = "PO"; - break; - case EM_QUEUE_TYPE_LOCAL: - q_type_str = "L "; - break; - default: - q_type_str = "??"; - break; - } - - APPL_PRINT("Q=%3" PRI_QUEUE "(%s cnt:%" PRIu64 ") %c", - tmp_qctx->q_hdl, q_type_str, diff, - (q_evcnt_low % 8 == 0) ? '\n' : ' '); - } - } - - if (!first) - APPL_PRINT("\nQueue count with too few events:%u\n\n", - q_evcnt_low); -} - -/** - * Try to take a spinlock and if it fails we know that another core is - * processing an event from the same atomic queue or atomic group, which - * should never happen => fatal error! The lock is for verification only, - * no sync purpose whatsoever. - */ -static inline void -verify_atomic_access__begin(eo_context_t *const eo_ctx) -{ - if (unlikely(eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && - !env_spinlock_trylock(&eo_ctx->verify_atomic_access))) - test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, - "EO Atomic context lost!"); -} - -/** - * Release the verification lock - */ -static inline void -verify_atomic_access__end(eo_context_t *const eo_ctx) -{ - if (unlikely(eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC)) - env_spinlock_unlock(&eo_ctx->verify_atomic_access); -} - -/** - * Print core specific statistics - */ -static void -print_core_stats(core_stat_t *const cstat, uint64_t print_events) -{ - uint64_t diff; - uint32_t hz; - double mhz; - double cycles_per_event; - uint64_t print_count; - - if (cstat->end_cycles > cstat->begin_cycles) - diff = cstat->end_cycles - cstat->begin_cycles; - else - diff = UINT64_MAX - cstat->begin_cycles + cstat->end_cycles + 1; - - print_count = cstat->print_count++; - cycles_per_event = (double)diff / (double)print_events; - - hz = env_core_hz(); - mhz = ((double)hz) / 1000000.0; - - APPL_PRINT(PRINT_CORE_STAT_FMT, em_core_id(), - cstat->pt_count[0], cstat->pt_count[1], cstat->pt_count[2], - cstat->pt_count[3], cstat->pt_count[4], cstat->pt_count[5], - cstat->pt_count[6], cstat->pt_count[7], cstat->pt_count[8], - cstat->pt_count[9], cycles_per_event, mhz, print_count); -} - -static void -print_event_msg_string(void) -{ - APPL_PRINT("\nToo few events detected for the following queues:\n"); -} +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine Queue Types test/example with included local queues. + * + * Similar to the queue_types_ag.c example but additionally adds local queues + * between the scheduled queues. + * See programs/example/queue/queue_types_ag.c + */ + +#include +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* Number of queue type pairs (constant, don't change) */ +#define QUEUE_TYPE_PAIRS 10 +/* + * Number of test EOs and queues. Must be an even number. + * Test has NUM_EO/2 EO pairs, that send ping-pong events. + * Depending on test dynamics (e.g. single burst in atomic + * queue) only one EO of a pair might be active at a time. + */ +#define NUM_EO (2 * 8 * QUEUE_TYPE_PAIRS) +/* Max number of queues supported by the test */ +#define MAX_QUEUES (NUM_EO / QUEUE_TYPE_PAIRS * 30) +/* Number of ping-pong events per EO pair */ +#define NUM_EVENT (3 * 32) +/* Number of data bytes in the event */ +#define DATA_SIZE 64 +/* Max number of cores supported by the test */ +#define MAX_CORES 64 +/* Print stats when the number of received events reaches this value on a core*/ +#define PRINT_COUNT 0x1000000 + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/* + * Enable atomic access checks. + * If enabled will crash the application if the atomic-processing context + * is violated, i.e. checks that events from an atomic queue are being + * processed one-by-one. + */ +#define VERIFY_ATOMIC_ACCESS 1 /* 0=False or 1=True */ +/* Call em_atomic_processing_end every once in a while */ +#define CALL_ATOMIC_PROCESSING_END 1 /* 0=False or 1=True */ + +/* Return 'TRUE' if the queue pair retains event order */ +#define ORDERED_PAIR(q_type_a, q_type_b) ( \ + (((q_type_a) == EM_QUEUE_TYPE_ATOMIC) || \ + ((q_type_a) == EM_QUEUE_TYPE_PARALLEL_ORDERED)) && \ + (((q_type_b) == EM_QUEUE_TYPE_ATOMIC) || \ + ((q_type_b) == EM_QUEUE_TYPE_PARALLEL_ORDERED))) + +#define ABS(nbr1, nbr2) (((nbr1) > (nbr2)) ? ((nbr1) - (nbr2)) : \ + ((nbr2) - (nbr1))) + +#define PRINT_CORE_STAT_FMT \ +"Core-%02i:\t" \ +"A-L-A-L:%6" PRIu64 " P-L-P-L:%6" PRIu64 " PO-L-PO-L:%6" PRIu64 "\t" \ +"P-L-A-L:%6" PRIu64 " PO-L-A-L:%6" PRIu64 " PO-L-P-L:%6" PRIu64 "\t" \ +"AG-L-AG-L:%6" PRIu64 " AG-L-A-L:%6" PRIu64 "\t" \ +"AG-L-P-L:%6" PRIu64 " AG-L-PO-L:%6" PRIu64 "\t" \ +"cycles/event:%.0f @%.0fMHz %" PRIu64 "\n" + +/** + * Combinations of Queue Type pairs + */ +#define NO_AG (0) +#define IN_AG (1) +typedef struct queue_type_pairs_ { + em_queue_type_t q_type[2]; + int in_atomic_group[2]; +} queue_type_pair_t; + +queue_type_pair_t queue_type_pairs[QUEUE_TYPE_PAIRS] = { + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, + { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_PARALLEL}, {NO_AG, NO_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL_ORDERED}, + {NO_AG, NO_AG} }, + { {EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_TYPE_ATOMIC}, {NO_AG, NO_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_ATOMIC}, + {NO_AG, NO_AG} }, + { {EM_QUEUE_TYPE_PARALLEL_ORDERED, EM_QUEUE_TYPE_PARALLEL}, + {NO_AG, NO_AG} }, + /* With Atomic Groups for atomic queues: */ + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, IN_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_ATOMIC}, {IN_AG, NO_AG} }, + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL}, {IN_AG, NO_AG} }, + /* Ordered Pair */ + { {EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL_ORDERED}, + {IN_AG, NO_AG} }, +}; + +COMPILE_TIME_ASSERT(sizeof(queue_type_pairs) == + (QUEUE_TYPE_PAIRS * sizeof(queue_type_pair_t)), + QUEUE_TYPE_PAIRS_SIZE_ERROR); + +typedef enum { + PT_ATOMIC_ATOMIC = 0, + PT_PARALLEL_PARALLEL = 1, + PT_PARALORD_PARALORD = 2, + PT_PARALLEL_ATOMIC = 3, + PT_PARALORD_ATOMIC = 4, + PT_PARALORD_PARALLEL = 5, + /* With Atomic Groups (AG) for atomic queues: */ + PT_AG_AG = 6, + PT_AG_ATOMIC = 7, + PT_AG_PARALLEL = 8, + PT_AG_PARALORD = 9, + PT_UNDEFINED +} pair_type_t; + +/** + * Test statistics (per core) + */ +typedef union { + uint8_t u8[2 * ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + + struct { + uint64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; + /* + * Pair-Type count, i.e. the number of events belonging to + * a certain pair-type on this core + */ + uint64_t pt_count[QUEUE_TYPE_PAIRS]; + }; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, + CORE_STAT_T__SIZE_ERROR); + +/** + * Test EO context + */ +typedef struct { + em_eo_t eo_hdl; + /* EO pair retains order? 0/1 */ + int ordered_pair; + pair_type_t pair_type; + int owns_ag_queues; + em_atomic_group_t agrp_hdl; + int peer_owns_ag_queues; + /* Atomic group is also set as queue type atomic */ + em_queue_type_t q_type; + env_spinlock_t verify_atomic_access; + + void *end[0] ENV_CACHE_LINE_ALIGNED; +} eo_context_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, + EO_CTX_T__SIZE_ERROR); + +/** + * Test Queue context + */ +typedef struct { + em_queue_t q_hdl; + em_queue_type_t q_type; + unsigned int idx; + + union { + struct { + /* Associated local queue */ + em_queue_t local_q_hdl; + int in_atomic_group; + uint64_t seqno; + /* Total number of events handled from the queue */ + env_atomic64_t num_events; + /* Number of events at the previous check-point */ + uint64_t prev_events; + } sched; + + struct { + /* Total number of events handled from the queue */ + uint64_t num_events; + /* Number of events at the previous check-point */ + uint64_t prev_events; + } local; + }; + + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_context_t; + +COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, + Q_CTX_T__SIZE_ERROR); + +#define EV_ID_DATA_EVENT 1 +#define EV_ID_START_EVENT 2 +/** Data event content */ +typedef struct { + int ev_id; + /* Next destination queue */ + em_queue_t dest; + em_queue_t src; + /* Sequence number */ + uint64_t seqno; + /* Test data */ + uint8_t data[DATA_SIZE]; +} data_event_t; +/** Startup event content */ +typedef struct { + int ev_id; + + int in_atomic_group_a; + int src_q_cnt; + em_queue_t src_queues[3]; + + int in_atomic_group_b; + int dst_q_cnt; + em_queue_t dst_queues[3]; +} start_event_t; +/** + * Test event, content identified by 'ev_id' + */ +typedef union { + int ev_id; + data_event_t data; + start_event_t start; +} test_event_t; + +/** + * Queue Types test shared memory + */ +typedef struct { + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; + + eo_context_t eo_context[NUM_EO] ENV_CACHE_LINE_ALIGNED; + + queue_context_t queue_context[MAX_QUEUES] ENV_CACHE_LINE_ALIGNED; + + unsigned num_queues ENV_CACHE_LINE_ALIGNED; + + em_pool_t pool; + + int teardown_in_progress; +} qtypes_shm_t; + +COMPILE_TIME_ASSERT(sizeof(qtypes_shm_t) % ENV_CACHE_LINE_SIZE == 0, + QTYPES_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL qtypes_shm_t *qtypes_shm; + +/** + * Local Function Prototypes + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t +start_locq(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t +start_local(void *eo_ctx, em_eo_t eo); +static em_status_t +start_local_locq(void *eo_ctx, em_eo_t eo); + +static em_status_t +stop(void *eo_context, em_eo_t eo); +static em_status_t +stop_locq(void *eo_context, em_eo_t eo); +static em_status_t +stop_local(void *eo_ctx, em_eo_t eo); +static em_status_t +stop_local_locq(void *eo_ctx, em_eo_t eo); + +static void +receive_a(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_b(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_locq(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static pair_type_t +get_pair_type(queue_type_pair_t *queue_type_pair); + +static inline void +verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, + uint64_t seqno); + +static void +verify_all_queues_get_events(void); + +static inline void +verify_atomic_access__begin(eo_context_t *const eo_ctx); + +static inline void +verify_atomic_access__end(eo_context_t *const eo_ctx); + +static void +print_core_stats(core_stat_t *const cstat, uint64_t print_events); + +static void +print_event_msg_string(void); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Queue Types test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + qtypes_shm = env_shared_reserve("QueueTypesSharedMem", + sizeof(qtypes_shm_t)); + em_register_error_handler(test_error_handler); + } else { + qtypes_shm = env_shared_lookup("QueueTypesSharedMem"); + } + + if (qtypes_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, + "Queue Types test init failed on EM-core: %u\n", + em_core_id()); + } else if (core == 0) { + memset(qtypes_shm, 0, sizeof(qtypes_shm_t)); + } +} + +/** + * Startup of the Queue Types test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_atomic_group_t atomic_group; + em_eo_t eo, eo_locq; + em_queue_t queue_a, queue_b; + em_queue_t queue_ag_a1, queue_ag_a2, queue_ag_a3; + em_queue_t queue_ag_b1, queue_ag_b2, queue_ag_b3; + em_queue_t queue_local_a, queue_local_b; + em_queue_type_t q_type_a, q_type_b; + em_status_t ret, start_ret = EM_ERROR; + eo_context_t *eo_ctx; + queue_context_t *q_ctx; + pair_type_t pair_type; + unsigned int qcnt = 0; /* queue context index */ + unsigned int eocnt = 0; /* eo context index */ + int in_atomic_group_a, in_atomic_group_b; + int ordered_pair; + char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; + int i; + + queue_a = EM_QUEUE_UNDEF; + queue_b = EM_QUEUE_UNDEF; + + queue_ag_a1 = EM_QUEUE_UNDEF; + queue_ag_a2 = EM_QUEUE_UNDEF; + queue_ag_a3 = EM_QUEUE_UNDEF; + + queue_ag_b1 = EM_QUEUE_UNDEF; + queue_ag_b2 = EM_QUEUE_UNDEF; + queue_ag_b3 = EM_QUEUE_UNDEF; + + queue_local_a = EM_QUEUE_UNDEF; + queue_local_b = EM_QUEUE_UNDEF; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + qtypes_shm->pool = appl_conf->pools[0]; + else + qtypes_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + qtypes_shm->pool); + + test_fatal_if(qtypes_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + test_fatal_if(em_eo_current() != EM_EO_UNDEF, + "Invalid current EO"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + + qtypes_shm->num_queues = 0; + qtypes_shm->teardown_in_progress = EM_FALSE; + + /* Create and start application pipelines. Send initial test events */ + for (i = 0; i < (NUM_EO / 4); i++) { + q_type_a = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[0]; + in_atomic_group_a = + queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[0]; + + q_type_b = queue_type_pairs[i % QUEUE_TYPE_PAIRS].q_type[1]; + in_atomic_group_b = + queue_type_pairs[i % QUEUE_TYPE_PAIRS].in_atomic_group[1]; + + ordered_pair = ORDERED_PAIR(q_type_a, q_type_b); + + pair_type = + get_pair_type(&queue_type_pairs[i % QUEUE_TYPE_PAIRS]); + test_fatal_if(pair_type == PT_UNDEFINED, + "Queue Pair Type UNDEFINED! (%u, %u)", + q_type_a, q_type_b); + + /* Create EO "local-A" */ + eo_ctx = &qtypes_shm->eo_context[eocnt++]; + eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; + + eo_locq = em_eo_create("EO-local-A", + start_locq, start_local_locq, + stop_locq, stop_local_locq, + receive_locq, eo_ctx); + queue_local_a = em_queue_create("queue-local-A", + EM_QUEUE_TYPE_LOCAL, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_UNDEF, NULL); + ret = em_eo_add_queue_sync(eo_locq, queue_local_a); + test_fatal_if(ret != EM_OK, "EO-local-A setup failed!"); + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_local_a; + q_ctx->q_type = EM_QUEUE_TYPE_LOCAL; + q_ctx->idx = qcnt++; + + ret = em_queue_set_context(queue_local_a, q_ctx); + test_fatal_if(ret != EM_OK, "EO-local-A setup failed!"); + + ret = em_eo_start_sync(eo_locq, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO-local-A setup:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + /* Create EO "A" */ + eo_ctx = &qtypes_shm->eo_context[eocnt++]; + eo_ctx->ordered_pair = ordered_pair; + eo_ctx->pair_type = pair_type; + eo_ctx->q_type = q_type_a; + eo_ctx->owns_ag_queues = in_atomic_group_a; + eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; + eo_ctx->peer_owns_ag_queues = in_atomic_group_b; + + eo = em_eo_create("EO-A", start, NULL, stop, NULL, receive_a, + eo_ctx); + + if (in_atomic_group_a && q_type_a == EM_QUEUE_TYPE_ATOMIC) { + atomic_group = + em_atomic_group_create("group_a", + EM_QUEUE_GROUP_DEFAULT); + test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, + "Atomic group creation failed!"); + + if (em_atomic_group_get_name(atomic_group, ag_name, + EM_ATOMIC_GROUP_NAME_LEN)) + APPL_PRINT("New atomic group:%s for EO:\t" + "%" PRI_EO "\n", ag_name, eo); + + eo_ctx->agrp_hdl = atomic_group; + + queue_ag_a1 = em_queue_create_ag("AG:Q A1", + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + queue_ag_a2 = em_queue_create_ag("AG:Q A2", + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + queue_ag_a3 = em_queue_create_ag("AG:Q A3", + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + + ret = em_eo_add_queue_sync(eo, queue_ag_a1); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_a2); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_a3); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_a1; + q_ctx->q_type = q_type_a; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_a; + q_ctx->sched.in_atomic_group = in_atomic_group_a; + + ret = em_queue_set_context(queue_ag_a1, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_a2; + q_ctx->q_type = q_type_a; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_a; + q_ctx->sched.in_atomic_group = in_atomic_group_a; + + ret = em_queue_set_context(queue_ag_a2, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_a3; + q_ctx->q_type = q_type_a; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_a; + q_ctx->sched.in_atomic_group = in_atomic_group_a; + + ret = em_queue_set_context(queue_ag_a3, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + } else { + queue_a = em_queue_create("queue A", q_type_a, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + ret = em_eo_add_queue_sync(eo, queue_a); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_a; + q_ctx->q_type = q_type_a; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_a; + q_ctx->sched.in_atomic_group = in_atomic_group_a; + + ret = em_queue_set_context(queue_a, q_ctx); + test_fatal_if(ret != EM_OK, "EO-A setup failed!"); + } + + /* Start EO-A */ + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO-A setup failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + /* Create EO "local-B" */ + eo_ctx = &qtypes_shm->eo_context[eocnt++]; + eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; + + eo_locq = em_eo_create("EO-local-B", start_locq, NULL, + stop_locq, NULL, receive_locq, eo_ctx); + queue_local_b = em_queue_create("queue-local-B", + EM_QUEUE_TYPE_LOCAL, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_UNDEF, NULL); + ret = em_eo_add_queue_sync(eo_locq, queue_local_b); + test_fatal_if(ret != EM_OK, "EO-local-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_local_b; + q_ctx->q_type = EM_QUEUE_TYPE_LOCAL; + q_ctx->idx = qcnt++; + + ret = em_queue_set_context(queue_local_b, q_ctx); + test_fatal_if(ret != EM_OK, "EO-local-B setup failed!"); + + ret = em_eo_start_sync(eo_locq, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO-local-B setup:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + test_fatal_if(ret != EM_OK, "EO-local-B setup failed!"); + + /* Create EO "B" */ + eo_ctx = &qtypes_shm->eo_context[eocnt++]; + eo_ctx->ordered_pair = ordered_pair; + eo_ctx->pair_type = pair_type; + eo_ctx->q_type = q_type_b; + eo_ctx->owns_ag_queues = in_atomic_group_b; + eo_ctx->agrp_hdl = EM_ATOMIC_GROUP_UNDEF; + eo_ctx->peer_owns_ag_queues = in_atomic_group_a; + + eo = em_eo_create("EO-B", start, start_local, stop, stop_local, + receive_b, eo_ctx); + + if (in_atomic_group_b && q_type_b == EM_QUEUE_TYPE_ATOMIC) { + atomic_group = + em_atomic_group_create("group_b", + EM_QUEUE_GROUP_DEFAULT); + test_fatal_if(atomic_group == EM_ATOMIC_GROUP_UNDEF, + "Atomic group creation failed!"); + + if (em_atomic_group_get_name(atomic_group, ag_name, + EM_ATOMIC_GROUP_NAME_LEN)) + APPL_PRINT("New atomic group:%s for EO:\t" + "%" PRI_EO "\n", ag_name, eo); + + eo_ctx->agrp_hdl = atomic_group; + + queue_ag_b1 = em_queue_create_ag("AG:Q B1", + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + queue_ag_b2 = em_queue_create_ag("AG:Q B2", + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + queue_ag_b3 = em_queue_create_ag("AG:Q B3", + EM_QUEUE_PRIO_NORMAL, + atomic_group, NULL); + + ret = em_eo_add_queue_sync(eo, queue_ag_b1); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_b2); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + ret = em_eo_add_queue_sync(eo, queue_ag_b3); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_b1; + q_ctx->q_type = q_type_b; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_b; + q_ctx->sched.in_atomic_group = in_atomic_group_b; + ret = em_queue_set_context(queue_ag_b1, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_b2; + q_ctx->q_type = q_type_b; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_b; + q_ctx->sched.in_atomic_group = in_atomic_group_b; + ret = em_queue_set_context(queue_ag_b2, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_ag_b3; + q_ctx->q_type = q_type_b; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_b; + q_ctx->sched.in_atomic_group = in_atomic_group_b; + ret = em_queue_set_context(queue_ag_b3, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + } else { + queue_b = em_queue_create("queue B", q_type_b, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + ret = em_eo_add_queue_sync(eo, queue_b); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + + q_ctx = &qtypes_shm->queue_context[qcnt]; + q_ctx->q_hdl = queue_b; + q_ctx->q_type = q_type_b; + q_ctx->idx = qcnt++; + q_ctx->sched.local_q_hdl = queue_local_b; + q_ctx->sched.in_atomic_group = in_atomic_group_b; + ret = em_queue_set_context(queue_b, q_ctx); + test_fatal_if(ret != EM_OK, "EO-B setup failed!"); + } + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO-B setup failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + /* update qcnt each round to avoid == 0 in recv-func */ + qtypes_shm->num_queues = qcnt; + + APPL_PRINT("\n"); + /* + * Allocate and send the startup event to the first EO of the + * pair of this round. + */ + em_event_t event = em_alloc(sizeof(start_event_t), + EM_EVENT_TYPE_SW, + qtypes_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + start_event_t *start_event = em_event_pointer(event); + + start_event->ev_id = EV_ID_START_EVENT; + + start_event->in_atomic_group_a = in_atomic_group_a; + if (in_atomic_group_a) { + start_event->src_q_cnt = 3; + start_event->src_queues[0] = queue_ag_a1; + start_event->src_queues[1] = queue_ag_a2; + start_event->src_queues[2] = queue_ag_a3; + } else { + start_event->src_q_cnt = 1; + start_event->src_queues[0] = queue_a; + } + + start_event->in_atomic_group_b = in_atomic_group_b; + if (in_atomic_group_b) { + start_event->dst_q_cnt = 3; + start_event->dst_queues[0] = queue_ag_b1; + start_event->dst_queues[1] = queue_ag_b2; + start_event->dst_queues[2] = queue_ag_b3; + } else { + start_event->dst_q_cnt = 1; + start_event->dst_queues[0] = queue_b; + } + + ret = em_send(event, start_event->src_queues[0]); + test_fatal_if(ret != EM_OK, "Event send:%" PRI_STAT "", ret); + } + + APPL_PRINT("\n\nQs:%i MAX:%i\n", qcnt, MAX_QUEUES); + APPL_PRINT("EOs:%i MAX:%i\n\n", eocnt, NUM_EO); + qtypes_shm->num_queues = qcnt; + + test_fatal_if(qcnt > MAX_QUEUES, "Queue context number too high!"); +} + +/** + * Test stop function + * + * @attention Run only on one EM core + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and teardown. + */ +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + eo_context_t *eo_ctx; + int i; + + (void)appl_conf; + + /* mark 'teardown in progress' to avoid errors seq.nbr check errors */ + qtypes_shm->teardown_in_progress = EM_TRUE; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* stop all EOs */ + for (i = 0; i < NUM_EO; i++) { + eo_ctx = &qtypes_shm->eo_context[i]; + eo = eo_ctx->eo_hdl; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO stop:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + } +} + +/** + * Termination of the 'Queue Types Local' test application. + * + * @attention Run on one EM core only + * + * @see cm_setup() for setup and teardown. + */ +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(qtypes_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting ", eo); + + eo_ctx->eo_hdl = eo; + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + + if (VERIFY_ATOMIC_ACCESS) + env_spinlock_init(&eo_ctx->verify_atomic_access); + + return EM_OK; +} + +/** + * @private + * + * EO local-start function. + */ +static em_status_t +start_local(void *eo_context, em_eo_t eo) +{ + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + return EM_OK; +} + +/** + * @private + * + * EO start function for EOs handling the local queues. + */ +static em_status_t +start_locq(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO-locq %" PRI_EO " starting ", eo); + + eo_ctx->eo_hdl = eo; + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + + return EM_OK; +} + +/** + * @private + * + * EO local-start function for EOs handling the local queues. + */ +static em_status_t +start_local_locq(void *eo_context, em_eo_t eo) +{ + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + eo_context_t *const eo_ctx = (eo_context_t *)eo_context; + em_status_t ret; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + if (eo_ctx->agrp_hdl != EM_ATOMIC_GROUP_UNDEF) { + ret = em_atomic_group_delete(eo_ctx->agrp_hdl); + test_fatal_if(ret != EM_OK, + "AGrp delete:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + } + + /* delete the EO at the end of the stop-function */ + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO delete:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * @private + * + * EO local-stop function. + */ +static em_status_t +stop_local(void *eo_context, em_eo_t eo) +{ + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + return EM_OK; +} + +/** + * @private + * + * EO stop function for EOs handling the local queues. + */ +static em_status_t +stop_locq(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("EO-locq %" PRI_EO " stopping.\n", eo); + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + + return EM_OK; +} + +/** + * @private + * + * EO local-stop function for EOs handling the local queues. + */ +static em_status_t +stop_local_locq(void *eo_context, em_eo_t eo) +{ + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo, "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != EM_QUEUE_UNDEF, + "Invalid current queue"); + return EM_OK; +} + +static void +initialize_events(start_event_t *const start) +{ + /* + * Allocate and send test events to the EO-pair of this round + */ + const int max_q_cnt = start->src_q_cnt > start->dst_q_cnt ? + start->src_q_cnt : start->dst_q_cnt; + /* tmp storage for all events to send this round */ + em_event_t all_events[max_q_cnt][NUM_EVENT]; + /* number of events for a queue in all_events[Q][events] */ + int ev_cnt[max_q_cnt]; + uint64_t seqno = 0; + int j, x, y; + + for (x = 0; x < max_q_cnt; x++) + ev_cnt[x] = 0; + + for (j = 0; j < NUM_EVENT;) { + for (x = 0, y = 0; x < max_q_cnt; x++, y++, j++) { + em_event_t event = em_alloc(sizeof(test_event_t), + EM_EVENT_TYPE_SW, + qtypes_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "Event alloc fails"); + + test_event_t *const test_event = + em_event_pointer(event); + + memset(test_event, 0, sizeof(test_event_t)); + test_event->ev_id = EV_ID_DATA_EVENT; + + if (start->in_atomic_group_b) + test_event->data.dest = start->dst_queues[y]; + else + test_event->data.dest = start->dst_queues[0]; + + test_event->data.src = start->src_queues[x]; + + if (start->in_atomic_group_a == + start->in_atomic_group_b) { + /* verify seqno (symmetric EO-pairs)*/ + test_event->data.seqno = seqno; + } + + all_events[x][ev_cnt[x]] = event; + ev_cnt[x] += 1; + } + seqno += 1; + } + + /* Send events to EO A */ + for (x = 0; x < max_q_cnt; x++) { + int n, m; + int num_sent = 0; + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = ev_cnt[x] / SEND_MULTI_MAX; + const int left_over = ev_cnt[x] % SEND_MULTI_MAX; + + for (n = 0, m = 0; n < send_rounds; + n++, m += SEND_MULTI_MAX) { + num_sent += em_send_multi(&all_events[x][m], + SEND_MULTI_MAX, + start->src_queues[x]); + } + if (left_over) { + num_sent += em_send_multi(&all_events[x][m], left_over, + start->src_queues[x]); + } + test_fatal_if(num_sent != ev_cnt[x], + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, ev_cnt[x], start->src_queues[x]); + } +} + +/** + * @private + * + * EO receive function for EO A. + * + * Forwards events to the next processing stage (EO) + * and calculates the event rate. + */ +static void +receive_a(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_t *const q_ctx = queue_context; + test_event_t *const test_event = em_event_pointer(event); + data_event_t *data_event; + core_stat_t *cstat; + em_queue_t dest_queue; + int core; + uint64_t core_events, print_events = 0; + uint64_t seqno; + em_status_t ret; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(test_event->ev_id == EV_ID_START_EVENT)) { + /* + * Start-up only, one time: initialize the test event sending. + * Called from EO-receive to avoid mixing up events & sequence + * numbers in start-up for ordered EO-pairs (sending from the + * start functions could mess up the seqno:s since all the + * cores are already in the dispatch loop). + */ + initialize_events(&test_event->start); + em_free(event); + return; + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__begin(eo_ctx); + + test_fatal_if(test_event->ev_id != EV_ID_DATA_EVENT, + "Unexpected ev-id:%d", test_event->ev_id); + data_event = &test_event->data; + + core = em_core_id(); + cstat = &qtypes_shm->core_stat[core]; + + core_events = cstat->events; + seqno = data_event->seqno; + + /* Increment Q specific event counter (parallel Qs req atomic inc:s)*/ + env_atomic64_inc(&q_ctx->sched.num_events); + + test_fatal_if(data_event->src != queue, + "EO-A queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", + data_event->src, queue); + + if (unlikely(core_events == 0)) { + cstat->begin_cycles = env_get_cycle(); + core_events += 1; + cstat->pt_count[eo_ctx->pair_type] += 1; + } else if (unlikely(core_events > PRINT_COUNT)) { + cstat->end_cycles = env_get_cycle(); + /* indicate that statistics should be printed this round: */ + print_events = core_events; + core_events = 0; + } else { + core_events += 1; + cstat->pt_count[eo_ctx->pair_type] += 1; + } + + if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { + /* Verify the seq nbr to make sure event order is maintained*/ + verify_seqno(eo_ctx, q_ctx, seqno); + } + + dest_queue = q_ctx->sched.local_q_hdl; + data_event->src = queue; + + cstat->events = core_events; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, "EO-A em_send failure"); + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__end(eo_ctx); + + /* Print core specific statistics */ + if (unlikely(print_events)) { + int i; + + if (core == 0) + verify_all_queues_get_events(); + + print_core_stats(cstat, print_events); + + for (i = 0; i < QUEUE_TYPE_PAIRS; i++) + cstat->pt_count[i] = 0; + + /* + * Test: Verify that EO & queue _current() and + * _get_context() APIs work as expected. + */ + test_fatal_if(em_eo_current() != eo_ctx->eo_hdl, + "Invalid current EO"); + test_fatal_if(em_eo_get_context(eo_ctx->eo_hdl) != eo_context, + "Invalid current EO context"); + test_fatal_if(em_queue_current() != queue, + "Invalid current queue"); + test_fatal_if(em_queue_get_context(queue) != queue_context, + "Invalid current EO context"); + + cstat->begin_cycles = env_get_cycle(); + } +} + +/** + * @private + * + * EO receive function for EO B. + * + * Forwards events to the next processing stage (EO). + */ +static void +receive_b(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_t *const q_ctx = queue_context; + core_stat_t *cstat; + em_queue_t dest_queue; + test_event_t *test_event; + data_event_t *data_event; + int core; + uint64_t core_events; + em_status_t ret; + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__begin(eo_ctx); + + test_event = em_event_pointer(event); + test_fatal_if(test_event->ev_id != EV_ID_DATA_EVENT, + "Unexpected ev-id:%d", test_event->ev_id); + data_event = &test_event->data; + + core = em_core_id(); + cstat = &qtypes_shm->core_stat[core]; + core_events = cstat->events; + + /* Increment Q specific event counter (parallel Qs req atomic inc:s)*/ + env_atomic64_inc(&q_ctx->sched.num_events); + + test_fatal_if(data_event->src != queue, + "EO-B queue mismatch:%" PRI_QUEUE "!=%" PRI_QUEUE "", + data_event->src, queue); + + if (eo_ctx->ordered_pair && eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC) { + /* Verify the seq nbr to make sure event order is maintained*/ + verify_seqno(eo_ctx, q_ctx, data_event->seqno); + } + + dest_queue = q_ctx->sched.local_q_hdl; + data_event->src = queue; + + if (unlikely(core_events == 0)) + cstat->begin_cycles = env_get_cycle(); + core_events++; + + cstat->events = core_events; + cstat->pt_count[eo_ctx->pair_type] += 1; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, "EO-B em_send failure"); + } + + if (VERIFY_ATOMIC_ACCESS) + verify_atomic_access__end(eo_ctx); +} + +/** + * @private + * + * EO receive function for EOs handling the local queues. + * + * Forwards events to the next processing stage (EO). + */ +static void +receive_locq(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_t *const q_ctx = queue_context; + test_event_t *const test_event = em_event_pointer(event); + data_event_t *data_event; + em_queue_t dest_queue; + em_status_t ret; + uint64_t queue_events; + + (void)type; + (void)queue; + (void)eo_ctx; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + test_fatal_if(test_event->ev_id != EV_ID_DATA_EVENT, + "Unexpected ev-id:%d", test_event->ev_id); + data_event = &test_event->data; + + queue_events = q_ctx->local.num_events++; + + dest_queue = data_event->dest; + + data_event->dest = data_event->src; + data_event->src = dest_queue; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, "EO-local em_send failure"); + } + + if (CALL_ATOMIC_PROCESSING_END) { + /* Call em_atomic_processing_end() every once in a while */ + if (queue_events % qtypes_shm->num_queues == q_ctx->idx) + em_atomic_processing_end(); + } +} + +static pair_type_t +get_pair_type(queue_type_pair_t *queue_type_pair) +{ + em_queue_type_t qt1 = queue_type_pair->q_type[0]; + em_queue_type_t qt2 = queue_type_pair->q_type[1]; + int in_ag1 = queue_type_pair->in_atomic_group[0]; + int in_ag2 = queue_type_pair->in_atomic_group[1]; + + switch (qt1) { + case EM_QUEUE_TYPE_ATOMIC: + switch (qt2) { + case EM_QUEUE_TYPE_ATOMIC: + if (in_ag1 && in_ag2) + return PT_AG_AG; + else if (in_ag1 || in_ag2) + return PT_AG_ATOMIC; + else + return PT_ATOMIC_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL: + if (in_ag1) + return PT_AG_PARALLEL; + else + return PT_PARALLEL_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + if (in_ag1) + return PT_AG_PARALORD; + else + return PT_PARALORD_ATOMIC; + } + break; + + case EM_QUEUE_TYPE_PARALLEL: + switch (qt2) { + case EM_QUEUE_TYPE_ATOMIC: + if (in_ag2) + return PT_AG_PARALLEL; + else + return PT_PARALLEL_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL: + return PT_PARALLEL_PARALLEL; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + return PT_PARALORD_PARALLEL; + } + break; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + switch (qt2) { + case EM_QUEUE_TYPE_ATOMIC: + if (in_ag2) + return PT_AG_PARALORD; + else + return PT_PARALORD_ATOMIC; + + case EM_QUEUE_TYPE_PARALLEL: + return PT_PARALORD_PARALLEL; + + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + return PT_PARALORD_PARALORD; + } + break; + } + + return PT_UNDEFINED; +} + +static inline void +verify_seqno(eo_context_t *const eo_ctx, queue_context_t *const q_ctx, + uint64_t seqno) +{ + if (unlikely(qtypes_shm->teardown_in_progress)) + return; + + if (eo_ctx->owns_ag_queues == eo_ctx->peer_owns_ag_queues) { + const uint64_t max_seqno = (eo_ctx->owns_ag_queues) ? + NUM_EVENT / 3 - 1 : NUM_EVENT - 1; + + if (q_ctx->sched.seqno != seqno) { + test_error((em_status_t)__LINE__, 0xdead, + "SEQUENCE ERROR A:\t" + "queue=%" PRI_QUEUE " Q-seqno=%" PRIu64 "\t" + "Event-seqno=%" PRIu64 " PT:%i", + q_ctx->q_hdl, q_ctx->sched.seqno, seqno, + eo_ctx->pair_type); + exit(EXIT_FAILURE); + } + + if (q_ctx->sched.seqno < max_seqno) + q_ctx->sched.seqno++; + else + q_ctx->sched.seqno = 0; + } +} + +/** + * Verifies that each queue processes all its events at least once per + * statistics round. + */ +static void +verify_all_queues_get_events(void) +{ + const unsigned int num_queues = qtypes_shm->num_queues; + unsigned int i, first = 1, q_evcnt_low = 0; + uint64_t curr, prev, diff; + + for (i = 0; i < num_queues; i++) { + queue_context_t *const tmp_qctx = + &qtypes_shm->queue_context[i]; + const uint64_t min_events = (tmp_qctx->sched.in_atomic_group) ? + NUM_EVENT / 3 : NUM_EVENT; + + if (tmp_qctx->q_type == EM_QUEUE_TYPE_LOCAL) { + curr = tmp_qctx->local.num_events; + prev = tmp_qctx->local.prev_events; + tmp_qctx->local.prev_events = curr; + } else { + curr = env_atomic64_get(&tmp_qctx->sched.num_events); + prev = tmp_qctx->sched.prev_events; + tmp_qctx->sched.prev_events = curr; + } + + diff = (curr >= prev) ? + curr - prev : UINT64_MAX - prev + curr + 1; + + if (unlikely(diff < min_events)) { + const char *q_type_str; + + q_evcnt_low++; + if (first) { + first = 0; + print_event_msg_string(); + } + + switch (tmp_qctx->q_type) { + case EM_QUEUE_TYPE_ATOMIC: + if (tmp_qctx->sched.in_atomic_group) + q_type_str = "AG"; + else + q_type_str = "A "; + break; + case EM_QUEUE_TYPE_PARALLEL: + q_type_str = "P "; + break; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + q_type_str = "PO"; + break; + case EM_QUEUE_TYPE_LOCAL: + q_type_str = "L "; + break; + default: + q_type_str = "??"; + break; + } + + APPL_PRINT("Q=%3" PRI_QUEUE "(%s cnt:%" PRIu64 ") %c", + tmp_qctx->q_hdl, q_type_str, diff, + (q_evcnt_low % 8 == 0) ? '\n' : ' '); + } + } + + if (!first) + APPL_PRINT("\nQueue count with too few events:%u\n\n", + q_evcnt_low); +} + +/** + * Try to take a spinlock and if it fails we know that another core is + * processing an event from the same atomic queue or atomic group, which + * should never happen => fatal error! The lock is for verification only, + * no sync purpose whatsoever. + */ +static inline void +verify_atomic_access__begin(eo_context_t *const eo_ctx) +{ + if (unlikely(eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC && + !env_spinlock_trylock(&eo_ctx->verify_atomic_access))) + test_error(EM_ERROR_SET_FATAL(__LINE__), 0xdead, + "EO Atomic context lost!"); +} + +/** + * Release the verification lock + */ +static inline void +verify_atomic_access__end(eo_context_t *const eo_ctx) +{ + if (unlikely(eo_ctx->q_type == EM_QUEUE_TYPE_ATOMIC)) + env_spinlock_unlock(&eo_ctx->verify_atomic_access); +} + +/** + * Print core specific statistics + */ +static void +print_core_stats(core_stat_t *const cstat, uint64_t print_events) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event; + uint64_t print_count; + + diff = env_cycles_diff(cstat->end_cycles, cstat->begin_cycles); + + print_count = cstat->print_count++; + cycles_per_event = (double)diff / (double)print_events; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + APPL_PRINT(PRINT_CORE_STAT_FMT, em_core_id(), + cstat->pt_count[0], cstat->pt_count[1], cstat->pt_count[2], + cstat->pt_count[3], cstat->pt_count[4], cstat->pt_count[5], + cstat->pt_count[6], cstat->pt_count[7], cstat->pt_count[8], + cstat->pt_count[9], cycles_per_event, mhz, print_count); +} + +static void +print_event_msg_string(void) +{ + APPL_PRINT("\nToo few events detected for the following queues:\n"); +} diff --git a/programs/example/queue_group/queue_group.c b/programs/example/queue_group/queue_group.c index 1cd0c217..0b7bc11b 100644 --- a/programs/example/queue_group/queue_group.c +++ b/programs/example/queue_group/queue_group.c @@ -1,1094 +1,1094 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine queue group feature test. - * - * Creates an EO with two queues: a notification queue and a data event queue. - * The notif queue belongs to the default queue group and can be processed on - * any core while the data queue belongs to a newly created queue group called - * "test_qgrp". The EO-receive function receives a number of data events and - * then modifies the test queue group (i.e. changes the cores allowed to - * process events from the data event queue). The test is restarted when the - * queue group has been modified enough times to include each core at least - * once. - */ - -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Defines & macros - */ -#define TEST_PRINT_COUNT 5 -#define TEST_QGRP_NAME_LEN EM_QUEUE_GROUP_NAME_LEN -#define TEST_QGRP_NAME_BASE "QGrp" /* Usage: QGrp001, QGrp002 */ - -/** The maximum number of cores this test supports */ -#define MAX_CORES 64 - -/** - * The number of data events to allocate, these are sent many rounds through - * the data test_queue for each core mask in the tested queue group - */ -#define EVENT_DATA_ALLOC_NBR (MAX_CORES * 16) - -/** Round 'val' to the next multiple of 'N' */ -#define ROUND_UP(val, N) ((((val) + ((N) - 1)) / (N)) * (N)) - -/** - * EO context used by the application - * - * Cache line alignment and padding taken care of in 'qgrp_shm_t' - */ -typedef struct app_eo_ctx_t { - em_eo_t eo; - - em_queue_t notif_queue; - em_queue_group_t notif_qgrp; - - em_queue_t test_queue; - em_queue_type_t test_queue_type; - /** Has the test_queue been added to the EO? */ - bool test_queue_added; - - em_queue_group_t test_qgrp; - em_event_group_t event_group; - - char test_qgrp_name[TEST_QGRP_NAME_LEN]; - int test_qgrp_name_nbr; - - em_core_mask_t core_mask_max; - - uint64_t qgrp_modify_count; - uint64_t modify_threshold; - uint64_t print_threshold; - uint64_t tot_modify_count; - uint64_t tot_modify_count_check; -} app_eo_ctx_t; - -/** - * Queue context for the test queue (receives data events, NOT notifications) - * - * Cache line alignment and padding taken care of in 'qgrp_shm_t' - */ -typedef struct app_q_ctx_t { - /* - * Use atomic operations to suit any queue type. - * An atomic queue does not need this but parallel and - * parallel-ordered do so opt to always use. - */ - env_atomic64_t event_count; -} app_q_ctx_t; - -/** - * Application event - */ -typedef union app_event_t { - /** Event id: notification */ - #define EVENT_NOTIF 1 - /** Event id: data */ - #define EVENT_DATA 2 - - /** Id is first in all events */ - uint32_t id; - - /** Event: notification */ - struct { - uint32_t id; - enum { - NOTIF_START_DONE, - NOTIF_RESTART, - NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST, - NOTIF_QUEUE_GROUP_MODIFY_DONE, - NOTIF_EVENT_GROUP_DATA_DONE - } type; - - em_queue_group_t used_group; - em_core_mask_t core_mask; - } notif; - - /** Event: data */ - struct { - uint32_t id; - em_queue_group_t used_group; - } data; -} app_event_t; - -/** - * Statistics for each core, pad to cache line size - */ -typedef union core_stat_t { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - struct { - uint64_t event_count; - }; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) == ENV_CACHE_LINE_SIZE, - CORE_STAT_T__SIZE_ERROR); - -/** - * Queue Group test shared memory - */ -typedef struct qgrp_shm_t { - em_pool_t pool ENV_CACHE_LINE_ALIGNED; - /** The application has seen the exit_flag and is ready for tear down */ - env_atomic32_t exit_ack; - - app_eo_ctx_t app_eo_ctx ENV_CACHE_LINE_ALIGNED; - - app_q_ctx_t app_q_ctx ENV_CACHE_LINE_ALIGNED; - - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; -} qgrp_shm_t; - -COMPILE_TIME_ASSERT(sizeof(qgrp_shm_t) % ENV_CACHE_LINE_SIZE == 0, - QGRP_SHM_T__SIZE_ERROR); -COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_eo_ctx) % ENV_CACHE_LINE_SIZE - == 0, OFFSETOF_EO_CTX_ERROR); -COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_q_ctx) % ENV_CACHE_LINE_SIZE - == 0, OFFSETOF_Q_CTX_ERROR); -COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, core_stat) % ENV_CACHE_LINE_SIZE - == 0, OFFSETOF_CORE_STAT_ERROR); - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL qgrp_shm_t *qgrp_shm; - -static void -receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context); - -static inline void -receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx); - -static void -notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue); -static void -notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue); -static void -notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue); - -static inline void -receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx); - -static void await_exit_ack(void); - -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static em_status_t -start_local(void *eo_context, em_eo_t eo); - -static em_status_t -stop_local(void *eo_context, em_eo_t eo); - -static void -next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Queue Group test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - qgrp_shm = env_shared_reserve("QueueGroupSharedMem", - sizeof(qgrp_shm_t)); - em_register_error_handler(test_error_handler); - } else { - qgrp_shm = env_shared_lookup("QueueGroupSharedMem"); - } - - if (qgrp_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Queue Group test init failed on EM-core: %u\n", - em_core_id()); - } else if (core == 0) { - memset(qgrp_shm, 0, sizeof(qgrp_shm_t)); - } -} - -/** - * Startup of the Queue Group test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - app_event_t *app_event; - em_event_t event; - em_queue_group_t default_group; - em_queue_t notif_queue; - em_event_group_t event_group; - em_status_t err, start_err = EM_ERROR; - em_eo_t eo; - em_notif_t notif_tbl[1]; - int core_count = em_core_count(); - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - qgrp_shm->pool = appl_conf->pools[0]; - else - qgrp_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - qgrp_shm->pool); - - test_fatal_if(qgrp_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - test_fatal_if(core_count > MAX_CORES, - "Test started on too many cores(%i)!\n" - "Max supported core count for this test is: %u\n", - core_count, MAX_CORES); - - env_atomic32_init(&qgrp_shm->exit_ack); - env_atomic32_set(&qgrp_shm->exit_ack, 0); - - /* - * Create the application EO and queues - */ - eo = em_eo_create("test_appl_queue_group", - start, start_local, stop, stop_local, - receive, &qgrp_shm->app_eo_ctx); - - default_group = em_queue_group_find("default"); - /* Verify that the find-func worked correctly. */ - test_fatal_if(default_group != EM_QUEUE_GROUP_DEFAULT, - "Default queue group(%" PRI_QGRP ") not found!", - default_group); - - notif_queue = em_queue_create("notif_queue", EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_HIGH, default_group, NULL); - test_fatal_if(notif_queue == EM_QUEUE_UNDEF, - "Notification queue creation failed!"); - - err = em_eo_add_queue_sync(eo, notif_queue); - test_fatal_if(err != EM_OK, - "Notification queue add to EO failed:%" PRI_STAT "", err); - - event_group = em_event_group_create(); - test_fatal_if(event_group == EM_EVENT_GROUP_UNDEF, - "Event group creation failed!"); - - qgrp_shm->app_eo_ctx.eo = eo; - qgrp_shm->app_eo_ctx.notif_queue = notif_queue; - qgrp_shm->app_eo_ctx.notif_qgrp = default_group; - qgrp_shm->app_eo_ctx.event_group = event_group; - - APPL_PRINT("Starting EO:%" PRI_EO "\t" - "- Notification Queue=%" PRI_QUEUE "\n", eo, notif_queue); - - event = em_alloc(sizeof(app_event_t), EM_EVENT_TYPE_SW, - qgrp_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "Notification event allocation failed"); - app_event = em_event_pointer(event); - memset(app_event, 0, sizeof(*app_event)); - app_event->notif.id = EVENT_NOTIF; - app_event->notif.type = NOTIF_START_DONE; - /* Verify group when receiving */ - app_event->notif.used_group = default_group; - - notif_tbl[0].event = event; - notif_tbl[0].queue = notif_queue; - notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; - - err = em_eo_start(eo, &start_err, NULL, 1, notif_tbl); - test_fatal_if(err != EM_OK, - "em_eo_start(%" PRI_EO "):%" PRI_STAT "", eo, err); - test_fatal_if(start_err != EM_OK, - "EO start function:%" PRI_STAT "", - start_err); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_status_t err; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %02d\n", __func__, core); - - /* Await 'exit_ack' to be set by the EO */ - await_exit_ack(); - - em_eo_t eo = qgrp_shm->app_eo_ctx.eo; - em_event_group_t egrp; - em_notif_t notif_tbl[1] = { {.event = EM_EVENT_UNDEF} }; - int num_notifs; - - err = em_eo_stop_sync(eo); - test_fatal_if(err != EM_OK, - "EO stop:%" PRI_STAT " EO:%" PRI_EO "", err, eo); - - /* No more dispatching of the EO's events, egrp can be freed */ - - egrp = qgrp_shm->app_eo_ctx.event_group; - if (!em_event_group_is_ready(egrp)) { - num_notifs = em_event_group_get_notif(egrp, 1, notif_tbl); - err = em_event_group_abort(egrp); - if (err == EM_OK && num_notifs == 1) - em_free(notif_tbl[0].event); - } - err = em_event_group_delete(egrp); - test_fatal_if(err != EM_OK, - "egrp:%" PRI_EGRP " delete:%" PRI_STAT " EO:%" PRI_EO "", - egrp, err, eo); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %02d\n", __func__, core); - - if (core == 0) { - env_shared_free(qgrp_shm); - em_unregister_error_handler(); - } -} - -/** - * Receive function for the test EO - */ -static void -receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - app_eo_ctx_t *eo_ctx = eo_context; - app_event_t *app_event = em_event_pointer(event); - /* Only set for the test_queue */ - app_q_ctx_t *q_ctx = queue_context; - - test_fatal_if(em_get_type_major(type) != EM_EVENT_TYPE_SW, - "Unexpected event type: 0x%x", type); - - if (unlikely(appl_shm->exit_flag)) { - /* Handle exit request */ - uint32_t exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); - - if (exit_ack) { - em_free(event); - return; - } - - if (app_event->id == EVENT_NOTIF && - (app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST || - app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE)) { - /* can be set by multiple cores */ - if (!exit_ack) - env_atomic32_set(&qgrp_shm->exit_ack, 1); - em_free(event); - return; - } - /* - * Handle events normally until a MODIFY_DONE has been - * received and exit_ack has been set. - */ - } - - switch (app_event->id) { - case EVENT_NOTIF: - receive_event_notif(eo_ctx, event, queue, q_ctx); - break; - case EVENT_DATA: - receive_event_data(eo_ctx, event, queue, q_ctx); - break; - default: - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Unknown event id(%u)!", app_event->id); - break; - } -} - -/** - * Handle the notification events received through the notif_queue - */ -static inline void -receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx) -{ - app_event_t *app_event = em_event_pointer(event); - em_status_t err; - (void)q_ctx; - - switch (app_event->notif.type) { - case NOTIF_RESTART: - APPL_PRINT("\n" - "***********************************************\n" - "!!! Restarting test !!!\n" - "***********************************************\n" - "\n\n\n"); - eo_ctx->tot_modify_count_check = 0; - notif_start_done(eo_ctx, event, queue); - break; - - case NOTIF_START_DONE: - notif_start_done(eo_ctx, event, queue); - break; - - case NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST: - err = em_eo_add_queue_sync(eo_ctx->eo, eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "EO add queue:%" PRI_STAT "", err); - eo_ctx->test_queue_added = true; - notif_queue_group_modify_done(eo_ctx, event, queue); - break; - - case NOTIF_QUEUE_GROUP_MODIFY_DONE: - notif_queue_group_modify_done(eo_ctx, event, queue); - break; - - case NOTIF_EVENT_GROUP_DATA_DONE: - notif_event_group_data_done(eo_ctx, event, queue); - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Unknown notification type:%i!", - app_event->notif.type); - break; - } -} - -/** Helper for receive_event_notif() */ -static void -notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue) -{ - em_queue_group_t new_qgrp; - em_queue_type_t new_qtype; - const char *new_qtype_str; - em_core_mask_t core_mask; - em_notif_t notif_tbl; - em_status_t err; - const em_queue_group_t qgrp_curr = em_queue_get_group(queue); - app_event_t *app_event = em_event_pointer(event); - - test_fatal_if(app_event->notif.used_group != qgrp_curr, - "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", - app_event->notif.used_group, qgrp_curr); - - /* Create a test queue group */ - snprintf(&eo_ctx->test_qgrp_name[0], - sizeof(eo_ctx->test_qgrp_name), "%s%03i", - TEST_QGRP_NAME_BASE, eo_ctx->test_qgrp_name_nbr); - - eo_ctx->test_qgrp_name[TEST_QGRP_NAME_LEN - 1] = '\0'; - eo_ctx->test_qgrp_name_nbr = (eo_ctx->test_qgrp_name_nbr + 1) - % 1000; /* Range 0-999 */ - - /* Start with EM core-0 (it's always running) */ - em_core_mask_zero(&core_mask); - em_core_mask_set(0, &core_mask); - - /* Re-use event */ - app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST; - app_event->notif.used_group = eo_ctx->notif_qgrp; - - notif_tbl.event = event; /* = app_event->notif */ - notif_tbl.queue = queue; - notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; - - em_core_mask_copy(&app_event->notif.core_mask, &core_mask); - - /* - * Create the queue group! - */ - new_qgrp = em_queue_group_create(eo_ctx->test_qgrp_name, &core_mask, - 1, ¬if_tbl); - test_fatal_if(new_qgrp == EM_QUEUE_GROUP_UNDEF, - "Queue group creation failed!"); - - if (eo_ctx->test_qgrp != EM_QUEUE_GROUP_UNDEF) { - /* - * Delete group - no need for notifs since 'modify to zero - * core mask' already done & queue deleted from group. Do the - * delete after the create to force creation of another - * queue group -> avoids always running the test with the same - * queue group. - */ - err = em_queue_group_delete(eo_ctx->test_qgrp, 0, NULL); - test_fatal_if(err != EM_OK, - "Qgrp delete:%" PRI_STAT "", err); - } - /* Store the new queue group to use for this test round */ - eo_ctx->test_qgrp = new_qgrp; - - /* - * Create a test queue for data events. The queue belongs to - * the test queue group. Change the queue type for every new - * test run. - */ - switch (eo_ctx->test_queue_type) { - case EM_QUEUE_TYPE_ATOMIC: - new_qtype = EM_QUEUE_TYPE_PARALLEL; - new_qtype_str = "PARALLEL"; - break; - case EM_QUEUE_TYPE_PARALLEL: - new_qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; - new_qtype_str = "PARALLEL_ORDERED"; - break; - default: - new_qtype = EM_QUEUE_TYPE_ATOMIC; - new_qtype_str = "ATOMIC"; - break; - } - eo_ctx->test_queue_type = new_qtype; - eo_ctx->test_queue = em_queue_create("test_queue", - eo_ctx->test_queue_type, - EM_QUEUE_PRIO_NORMAL, - eo_ctx->test_qgrp, NULL); - test_fatal_if(eo_ctx->test_queue == EM_QUEUE_UNDEF, - "Test queue creation failed!"); - eo_ctx->test_queue_added = false; - - APPL_PRINT("\n" - "Created test queue:%" PRI_QUEUE " type:%s(%u)\t" - "queue group:%" PRI_QGRP " (name:\"%s\")\n", - eo_ctx->test_queue, new_qtype_str, eo_ctx->test_queue_type, - eo_ctx->test_qgrp, eo_ctx->test_qgrp_name); - - memset(&qgrp_shm->app_q_ctx, 0, sizeof(qgrp_shm->app_q_ctx)); - env_atomic64_init(&qgrp_shm->app_q_ctx.event_count); - - err = em_queue_set_context(eo_ctx->test_queue, &qgrp_shm->app_q_ctx); - test_fatal_if(err != EM_OK, "Set queue context:%" PRI_STAT "", err); - /* - * Synchronize EO context. Event is sent through notification, - * which might have happened before we write the eo_ctx. - */ - env_sync_mem(); -} - -/** Helper for receive_event_notif() */ -static void -notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue) -{ - em_status_t err; - const em_queue_group_t qgrp_curr = em_queue_get_group(queue); - app_event_t *app_event = em_event_pointer(event); - - test_fatal_if(app_event->notif.used_group != qgrp_curr, - "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", - app_event->notif.used_group, qgrp_curr); - - if (unlikely(em_core_mask_iszero(&app_event->notif.core_mask))) { - APPL_PRINT("\n" - "*************************************\n" - "All cores removed from QueueGroup!\n" - "*************************************\n"); - - test_fatal_if(eo_ctx->tot_modify_count != - eo_ctx->tot_modify_count_check, - "Modify count != actual count:\t" - "%" PRIu64 " vs %" PRIu64 "", - eo_ctx->tot_modify_count, - eo_ctx->tot_modify_count_check); - - err = em_eo_remove_queue_sync(eo_ctx->eo, - eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "Remove test queue:%" PRI_STAT "", err); - eo_ctx->test_queue_added = false; - - APPL_PRINT("Deleting test queue:%" PRI_QUEUE ",\t" - "Qgrp ID:%" PRI_QGRP " (name:\"%s\")\n", - eo_ctx->test_queue, eo_ctx->test_qgrp, - eo_ctx->test_qgrp_name); - - err = em_queue_delete(eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "Delete test queue:%" PRI_STAT "", err); - eo_ctx->test_queue = EM_QUEUE_UNDEF; - - /* - * Delete the queue group later in restart after the - * creation of a new group. This forces the creation - * and usage of at least two different queue groups. - */ - app_event->notif.id = EVENT_NOTIF; - app_event->notif.type = NOTIF_RESTART; - app_event->notif.used_group = eo_ctx->notif_qgrp; - err = em_send(event, eo_ctx->notif_queue); - if (unlikely(err != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send to notif queue:%" PRI_STAT "", err); - } - } else { - em_notif_t egroup_notif_tbl[1]; - int i; - - /* Reuse the event */ - app_event->notif.id = EVENT_NOTIF; - app_event->notif.type = NOTIF_EVENT_GROUP_DATA_DONE; - app_event->notif.used_group = eo_ctx->notif_qgrp; - - egroup_notif_tbl[0].event = event; - egroup_notif_tbl[0].queue = eo_ctx->notif_queue; - egroup_notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; - - err = em_event_group_apply(eo_ctx->event_group, - eo_ctx->modify_threshold, 1, - egroup_notif_tbl); - test_fatal_if(err != EM_OK, - "em_event_group_apply():%" PRI_STAT "", err); - - for (i = 0; i < EVENT_DATA_ALLOC_NBR; i++) { - em_event_t ev_data = em_alloc(sizeof(app_event_t), - EM_EVENT_TYPE_SW, - qgrp_shm->pool); - test_fatal_if(ev_data == EM_EVENT_UNDEF, - "Event alloc failed!"); - - app_event_t *app_event = em_event_pointer(ev_data); - - app_event->id = EVENT_DATA; - app_event->data.used_group = eo_ctx->test_qgrp; - - err = em_send_group(ev_data, eo_ctx->test_queue, - eo_ctx->event_group); - if (unlikely(err != EM_OK)) { - em_free(ev_data); - test_fatal_if(!appl_shm->exit_flag, - "Send to test queue:%" PRI_STAT "", - err); - } - } - } -} - -/** Helper for receive_event_notif() */ -static void -notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue) -{ - em_core_mask_t core_mask, used_mask; - em_notif_t notif_tbl; - em_status_t err; - int core_count; - int i; - const em_queue_group_t qgrp_curr = em_queue_get_group(queue); - app_event_t *app_event = em_event_pointer(event); - - test_fatal_if(app_event->notif.used_group != qgrp_curr, - "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", - app_event->notif.used_group, qgrp_curr); - - uint64_t mod_cnt = ++eo_ctx->qgrp_modify_count; - - eo_ctx->tot_modify_count_check++; - - err = em_queue_group_get_mask(eo_ctx->test_qgrp, &used_mask); - test_fatal_if(err != EM_OK, - "Get queue group mask:%" PRI_STAT "", err); - - /* Get the next core mask for the test group */ - next_core_mask(/*New*/ &core_mask, /*Max*/ &eo_ctx->core_mask_max, - eo_ctx->tot_modify_count_check); - - if (mod_cnt >= eo_ctx->print_threshold || - em_core_mask_iszero(&core_mask)) { - char used_mask_str[EM_CORE_MASK_STRLEN]; - char core_mask_str[EM_CORE_MASK_STRLEN]; - - em_core_mask_tostr(used_mask_str, EM_CORE_MASK_STRLEN, - &used_mask); - em_core_mask_tostr(core_mask_str, EM_CORE_MASK_STRLEN, - &core_mask); - APPL_PRINT("\n" - "****************************************\n" - "Received %" PRIu64 " events on Q:%" PRI_QUEUE ":\n" - " QueueGroup:%" PRI_QGRP ", Curr Coremask:%s\n" - "Now Modifying:\n" - " QueueGroup:%" PRI_QGRP ", New Coremask:%s\n" - "****************************************\n", - env_atomic64_get(&qgrp_shm->app_q_ctx.event_count), - eo_ctx->test_queue, eo_ctx->test_qgrp, - used_mask_str, eo_ctx->test_qgrp, core_mask_str); - - eo_ctx->qgrp_modify_count = 0; - } - - /* - * Sanity check: verify that all cores that process the queue - * group actually received events and that other cores do not - * get any events. - */ - core_count = em_core_count(); - for (i = 0; i < core_count; i++) { - const uint64_t ev_count = qgrp_shm->core_stat[i].event_count; - char mstr[EM_CORE_MASK_STRLEN]; - - if (em_core_mask_isset(i, &used_mask)) { - if (unlikely(ev_count == 0)) { - em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, - &used_mask); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "No events on core%i, mask:%s", - i, mstr); - } - } else if (unlikely(ev_count > 0)) { - em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, - &used_mask); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Events:%" PRIu64 " on inv.core%i, mask:%s", - ev_count, i, mstr); - } - } - - memset(qgrp_shm->core_stat, 0, sizeof(qgrp_shm->core_stat)); - env_atomic64_set(&qgrp_shm->app_q_ctx.event_count, 0); - - /* Reuse the event */ - app_event->id = EVENT_NOTIF; - app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE; - app_event->notif.used_group = eo_ctx->notif_qgrp; - em_core_mask_copy(&app_event->notif.core_mask, &core_mask); - - notif_tbl.event = event; - notif_tbl.queue = eo_ctx->notif_queue; - notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; - - err = em_queue_group_modify(eo_ctx->test_qgrp, &core_mask, - 1, ¬if_tbl); - test_fatal_if(err != EM_OK, - "em_queue_group_modify():%" PRI_STAT "", err); -} - -/** - * Handle the test data events received through the test_queue - * - * Check that the queue group is valid and send the data back to the same - * queue for another round. - * The last event should trigger a notification event to be sent to the - * notif_queue to begin the queue group modification sequence. - */ -static inline void -receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, - em_queue_t queue, app_q_ctx_t *q_ctx) -{ - int core_id = em_core_id(); - app_event_t *app_event = em_event_pointer(event); - em_queue_group_t qgrp_curr = em_queue_get_group(queue); - em_core_mask_t used_mask; - em_status_t err; - const uint64_t event_count = - env_atomic64_add_return(&q_ctx->event_count, 1); - qgrp_shm->core_stat[core_id].event_count++; - - /* Verify that the queue group is correct & expected */ - test_fatal_if(app_event->data.used_group != qgrp_curr, - "Queue grp mismatch:%" PRI_QGRP "!=%" PRI_QGRP "", - app_event->data.used_group, qgrp_curr); - - /* Verify that this core is a valid receiver of events in this group */ - err = em_queue_group_get_mask(qgrp_curr, &used_mask); - test_fatal_if(err != EM_OK, - "Get queue group mask:%" PRI_STAT "", err); - - if (unlikely(!em_core_mask_isset(core_id, &used_mask))) { - char mask_str[EM_CORE_MASK_STRLEN]; - - em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, &used_mask); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Core bit not set in core mask! core:%02i mask:%s", - core_id, mask_str); - } - - /* - * Handle the test data event - */ - if (event_count <= eo_ctx->modify_threshold - EVENT_DATA_ALLOC_NBR) { - /* Send the data event for another round */ - err = em_send_group(event, eo_ctx->test_queue, - eo_ctx->event_group); - if (unlikely(err != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send to test queue:%" PRI_STAT "", err); - } - } else if (event_count <= eo_ctx->modify_threshold) { - /* - * Free the events for the last round, an event group - * notification event should be triggered when the last event - * has been processed - */ - em_free(event); - } else { - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xacdc, - "Invalid event count(%u)!", event_count); - } -} - -/** - * Await exit_ack to be set by the EO. - */ -static void await_exit_ack(void) -{ - env_time_t t_max = env_time_global_from_ns(10 * 1000000000ULL); /*10s*/ - env_time_t t_now = ENV_TIME_NULL; - env_time_t t_start = env_time_global(); - env_time_t t_end = env_time_sum(t_start, t_max); - uint64_t ns; - uint32_t exit_ack = 0; - - long double sec; - - do { - if (!exit_ack) - em_dispatch(1); - exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); - t_now = env_time_global(); - } while (!exit_ack && env_time_cmp(t_now, t_end) < 0); - - ns = env_time_diff_ns(t_now, t_start); - sec = (long double)ns / 1000000000.0; - - if (unlikely(!exit_ack)) { - test_error(EM_ERR_TIMEOUT, 0xdead, - "Timeout: No exit_ack within %Lfs!\n", sec); - return; - } - - APPL_PRINT("exit_ack in %Lfs on EM-core:%02d => Tearing down\n", - sec, em_core_id()); -} - -/** - * Global start function for the test EO - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - app_eo_ctx_t *eo_ctx = eo_context; - uint64_t tot_modify_count = 0; - uint64_t tmp; - int ret; - - (void)eo; - (void)conf; - - APPL_PRINT("Queue Group Test - Global EO Start\n"); - - snprintf(&eo_ctx->test_qgrp_name[0], - sizeof(eo_ctx->test_qgrp_name), - "%s%03i", TEST_QGRP_NAME_BASE, 0); - - em_core_mask_zero(&eo_ctx->core_mask_max); - em_core_mask_set_count(em_core_count(), &eo_ctx->core_mask_max); - - /* - * The values used below in calculations are derived from the way the - * next_core_mask() function calculates the next core mask to use. - */ - ret = em_core_mask_get_bits(&tmp, 1, &eo_ctx->core_mask_max); - if (unlikely(ret != 1)) { - char mask_str[EM_CORE_MASK_STRLEN]; - - em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, - &eo_ctx->core_mask_max); - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "em_core_mask_get_bits(coremask=%s), ret=%i", - mask_str, ret); - } - - do { - tot_modify_count += (tmp & 0xFF) + 1; - tmp = (tmp >> 4); - if (tmp < 0x10) - break; - } while (tmp); - - tot_modify_count -= 1; - - eo_ctx->tot_modify_count = tot_modify_count; - eo_ctx->tot_modify_count_check = 0; - - eo_ctx->print_threshold = tot_modify_count / TEST_PRINT_COUNT; - - if (eo_ctx->print_threshold == 0) - eo_ctx->print_threshold = 1; - - /* - * 256*15 - 1 is the maximum number of core masks tested when 64 - * cores (max) are running this test. - */ - eo_ctx->modify_threshold = - ((256 * 15 * 0x1000) - 1) / tot_modify_count; - eo_ctx->modify_threshold = ROUND_UP(eo_ctx->modify_threshold, - EVENT_DATA_ALLOC_NBR); - - APPL_PRINT("\n" - "*******************************************************\n" - "Test threshold values set:\n" - " Tot group modifies: %" PRIu64 "\n" - " Events received on group before modify: %" PRIu64 "\n" - " Group modify print threshold: %" PRIu64 "\n" - "*******************************************************\n" - "\n", - tot_modify_count, eo_ctx->modify_threshold, - eo_ctx->print_threshold); - - return EM_OK; -} - -/** - * Global stop function for the test EO - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - em_status_t err; - app_eo_ctx_t *eo_ctx = eo_context; - - /* remove and delete all of the EO's queues */ - err = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(err != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - err, eo); - if (eo_ctx->test_queue != EM_QUEUE_UNDEF && !eo_ctx->test_queue_added) { - err = em_queue_delete(eo_ctx->test_queue); - test_fatal_if(err != EM_OK, - "Delete test queue:%" PRI_STAT "", err); - } - - /* delete the EO at the end of the stop-function */ - err = em_eo_delete(eo); - test_fatal_if(err != EM_OK, - "EO delete:%" PRI_STAT " EO:%" PRI_EO "", - err, eo); - APPL_PRINT("Queue Group Test - Global EO Stop\n"); - - return EM_OK; -} - -/** - * Local start function for the test EO - */ -static em_status_t -start_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - (void)eo; - - APPL_PRINT("Queue Group Test - Local EO Start: EM-core:%02d\n", - em_core_id()); - return EM_OK; -} - -/** - * Local stop function for the test EO - */ -static em_status_t -stop_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - (void)eo; - - APPL_PRINT("Queue Group Test - Local EO Stop: EM-core:%02d\n", - em_core_id()); - return EM_OK; -} - -/** - * Update the core mask: - * E.g. if max_mask is 0xFFFF: 0x0001-0x0100 (256 masks), - * 0x0010->0x1000 (256 masks), 0x0100-0x0000 (255 masks) - */ -static void -next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count) -{ - uint64_t mask64 = ((uint64_t)(count % 256) + 1) << (4 * (count / 256)); - - em_core_mask_zero(new_mask); - em_core_mask_set_bits(&mask64, 1, new_mask); - em_core_mask_and(new_mask, new_mask, max_mask); -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine queue group feature test. + * + * Creates an EO with two queues: a notification queue and a data event queue. + * The notif queue belongs to the default queue group and can be processed on + * any core while the data queue belongs to a newly created queue group called + * "test_qgrp". The EO-receive function receives a number of data events and + * then modifies the test queue group (i.e. changes the cores allowed to + * process events from the data event queue). The test is restarted when the + * queue group has been modified enough times to include each core at least + * once. + */ + +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Defines & macros + */ +#define TEST_PRINT_COUNT 5 +#define TEST_QGRP_NAME_LEN EM_QUEUE_GROUP_NAME_LEN +#define TEST_QGRP_NAME_BASE "QGrp" /* Usage: QGrp001, QGrp002 */ + +/** The maximum number of cores this test supports */ +#define MAX_CORES 64 + +/** + * The number of data events to allocate, these are sent many rounds through + * the data test_queue for each core mask in the tested queue group + */ +#define EVENT_DATA_ALLOC_NBR (MAX_CORES * 16) + +/** Round 'val' to the next multiple of 'N' */ +#define ROUND_UP(val, N) ((((val) + ((N) - 1)) / (N)) * (N)) + +/** + * EO context used by the application + * + * Cache line alignment and padding taken care of in 'qgrp_shm_t' + */ +typedef struct app_eo_ctx_t { + em_eo_t eo; + + em_queue_t notif_queue; + em_queue_group_t notif_qgrp; + + em_queue_t test_queue; + em_queue_type_t test_queue_type; + /** Has the test_queue been added to the EO? */ + bool test_queue_added; + + em_queue_group_t test_qgrp; + em_event_group_t event_group; + + char test_qgrp_name[TEST_QGRP_NAME_LEN]; + int test_qgrp_name_nbr; + + em_core_mask_t core_mask_max; + + uint64_t qgrp_modify_count; + uint64_t modify_threshold; + uint64_t print_threshold; + uint64_t tot_modify_count; + uint64_t tot_modify_count_check; +} app_eo_ctx_t; + +/** + * Queue context for the test queue (receives data events, NOT notifications) + * + * Cache line alignment and padding taken care of in 'qgrp_shm_t' + */ +typedef struct app_q_ctx_t { + /* + * Use atomic operations to suit any queue type. + * An atomic queue does not need this but parallel and + * parallel-ordered do so opt to always use. + */ + env_atomic64_t event_count; +} app_q_ctx_t; + +/** + * Application event + */ +typedef union app_event_t { + /** Event id: notification */ + #define EVENT_NOTIF 1 + /** Event id: data */ + #define EVENT_DATA 2 + + /** Id is first in all events */ + uint32_t id; + + /** Event: notification */ + struct { + uint32_t id; + enum { + NOTIF_START_DONE, + NOTIF_RESTART, + NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST, + NOTIF_QUEUE_GROUP_MODIFY_DONE, + NOTIF_EVENT_GROUP_DATA_DONE + } type; + + em_queue_group_t used_group; + em_core_mask_t core_mask; + } notif; + + /** Event: data */ + struct { + uint32_t id; + em_queue_group_t used_group; + } data; +} app_event_t; + +/** + * Statistics for each core, pad to cache line size + */ +typedef union core_stat_t { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + struct { + uint64_t event_count; + }; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) == ENV_CACHE_LINE_SIZE, + CORE_STAT_T__SIZE_ERROR); + +/** + * Queue Group test shared memory + */ +typedef struct qgrp_shm_t { + em_pool_t pool ENV_CACHE_LINE_ALIGNED; + /** The application has seen the exit_flag and is ready for tear down */ + env_atomic32_t exit_ack; + + app_eo_ctx_t app_eo_ctx ENV_CACHE_LINE_ALIGNED; + + app_q_ctx_t app_q_ctx ENV_CACHE_LINE_ALIGNED; + + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; +} qgrp_shm_t; + +COMPILE_TIME_ASSERT(sizeof(qgrp_shm_t) % ENV_CACHE_LINE_SIZE == 0, + QGRP_SHM_T__SIZE_ERROR); +COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_eo_ctx) % ENV_CACHE_LINE_SIZE + == 0, OFFSETOF_EO_CTX_ERROR); +COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, app_q_ctx) % ENV_CACHE_LINE_SIZE + == 0, OFFSETOF_Q_CTX_ERROR); +COMPILE_TIME_ASSERT(offsetof(qgrp_shm_t, core_stat) % ENV_CACHE_LINE_SIZE + == 0, OFFSETOF_CORE_STAT_ERROR); + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL qgrp_shm_t *qgrp_shm; + +static void +receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context); + +static inline void +receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx); + +static void +notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue); +static void +notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue); +static void +notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue); + +static inline void +receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx); + +static void await_exit_ack(void); + +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static em_status_t +start_local(void *eo_context, em_eo_t eo); + +static em_status_t +stop_local(void *eo_context, em_eo_t eo); + +static void +next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Queue Group test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + qgrp_shm = env_shared_reserve("QueueGroupSharedMem", + sizeof(qgrp_shm_t)); + em_register_error_handler(test_error_handler); + } else { + qgrp_shm = env_shared_lookup("QueueGroupSharedMem"); + } + + if (qgrp_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Queue Group test init failed on EM-core: %u\n", + em_core_id()); + } else if (core == 0) { + memset(qgrp_shm, 0, sizeof(qgrp_shm_t)); + } +} + +/** + * Startup of the Queue Group test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + app_event_t *app_event; + em_event_t event; + em_queue_group_t default_group; + em_queue_t notif_queue; + em_event_group_t event_group; + em_status_t err, start_err = EM_ERROR; + em_eo_t eo; + em_notif_t notif_tbl[1]; + int core_count = em_core_count(); + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + qgrp_shm->pool = appl_conf->pools[0]; + else + qgrp_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + qgrp_shm->pool); + + test_fatal_if(qgrp_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + test_fatal_if(core_count > MAX_CORES, + "Test started on too many cores(%i)!\n" + "Max supported core count for this test is: %u\n", + core_count, MAX_CORES); + + env_atomic32_init(&qgrp_shm->exit_ack); + env_atomic32_set(&qgrp_shm->exit_ack, 0); + + /* + * Create the application EO and queues + */ + eo = em_eo_create("test_appl_queue_group", + start, start_local, stop, stop_local, + receive, &qgrp_shm->app_eo_ctx); + + default_group = em_queue_group_find("default"); + /* Verify that the find-func worked correctly. */ + test_fatal_if(default_group != EM_QUEUE_GROUP_DEFAULT, + "Default queue group(%" PRI_QGRP ") not found!", + default_group); + + notif_queue = em_queue_create("notif_queue", EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_HIGH, default_group, NULL); + test_fatal_if(notif_queue == EM_QUEUE_UNDEF, + "Notification queue creation failed!"); + + err = em_eo_add_queue_sync(eo, notif_queue); + test_fatal_if(err != EM_OK, + "Notification queue add to EO failed:%" PRI_STAT "", err); + + event_group = em_event_group_create(); + test_fatal_if(event_group == EM_EVENT_GROUP_UNDEF, + "Event group creation failed!"); + + qgrp_shm->app_eo_ctx.eo = eo; + qgrp_shm->app_eo_ctx.notif_queue = notif_queue; + qgrp_shm->app_eo_ctx.notif_qgrp = default_group; + qgrp_shm->app_eo_ctx.event_group = event_group; + + APPL_PRINT("Starting EO:%" PRI_EO "\t" + "- Notification Queue=%" PRI_QUEUE "\n", eo, notif_queue); + + event = em_alloc(sizeof(app_event_t), EM_EVENT_TYPE_SW, + qgrp_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "Notification event allocation failed"); + app_event = em_event_pointer(event); + memset(app_event, 0, sizeof(*app_event)); + app_event->notif.id = EVENT_NOTIF; + app_event->notif.type = NOTIF_START_DONE; + /* Verify group when receiving */ + app_event->notif.used_group = default_group; + + notif_tbl[0].event = event; + notif_tbl[0].queue = notif_queue; + notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; + + err = em_eo_start(eo, &start_err, NULL, 1, notif_tbl); + test_fatal_if(err != EM_OK, + "em_eo_start(%" PRI_EO "):%" PRI_STAT "", eo, err); + test_fatal_if(start_err != EM_OK, + "EO start function:%" PRI_STAT "", + start_err); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_status_t err; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %02d\n", __func__, core); + + /* Await 'exit_ack' to be set by the EO */ + await_exit_ack(); + + em_eo_t eo = qgrp_shm->app_eo_ctx.eo; + em_event_group_t egrp; + em_notif_t notif_tbl[1] = { {.event = EM_EVENT_UNDEF} }; + int num_notifs; + + err = em_eo_stop_sync(eo); + test_fatal_if(err != EM_OK, + "EO stop:%" PRI_STAT " EO:%" PRI_EO "", err, eo); + + /* No more dispatching of the EO's events, egrp can be freed */ + + egrp = qgrp_shm->app_eo_ctx.event_group; + if (!em_event_group_is_ready(egrp)) { + num_notifs = em_event_group_get_notif(egrp, 1, notif_tbl); + err = em_event_group_abort(egrp); + if (err == EM_OK && num_notifs == 1) + em_free(notif_tbl[0].event); + } + err = em_event_group_delete(egrp); + test_fatal_if(err != EM_OK, + "egrp:%" PRI_EGRP " delete:%" PRI_STAT " EO:%" PRI_EO "", + egrp, err, eo); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %02d\n", __func__, core); + + if (core == 0) { + env_shared_free(qgrp_shm); + em_unregister_error_handler(); + } +} + +/** + * Receive function for the test EO + */ +static void +receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + app_eo_ctx_t *eo_ctx = eo_context; + app_event_t *app_event = em_event_pointer(event); + /* Only set for the test_queue */ + app_q_ctx_t *q_ctx = queue_context; + + test_fatal_if(em_get_type_major(type) != EM_EVENT_TYPE_SW, + "Unexpected event type: 0x%x", type); + + if (unlikely(appl_shm->exit_flag)) { + /* Handle exit request */ + uint32_t exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); + + if (exit_ack) { + em_free(event); + return; + } + + if (app_event->id == EVENT_NOTIF && + (app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST || + app_event->notif.type == NOTIF_QUEUE_GROUP_MODIFY_DONE)) { + /* can be set by multiple cores */ + if (!exit_ack) + env_atomic32_set(&qgrp_shm->exit_ack, 1); + em_free(event); + return; + } + /* + * Handle events normally until a MODIFY_DONE has been + * received and exit_ack has been set. + */ + } + + switch (app_event->id) { + case EVENT_NOTIF: + receive_event_notif(eo_ctx, event, queue, q_ctx); + break; + case EVENT_DATA: + receive_event_data(eo_ctx, event, queue, q_ctx); + break; + default: + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Unknown event id(%u)!", app_event->id); + break; + } +} + +/** + * Handle the notification events received through the notif_queue + */ +static inline void +receive_event_notif(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx) +{ + app_event_t *app_event = em_event_pointer(event); + em_status_t err; + (void)q_ctx; + + switch (app_event->notif.type) { + case NOTIF_RESTART: + APPL_PRINT("\n" + "***********************************************\n" + "!!! Restarting test !!!\n" + "***********************************************\n" + "\n\n\n"); + eo_ctx->tot_modify_count_check = 0; + notif_start_done(eo_ctx, event, queue); + break; + + case NOTIF_START_DONE: + notif_start_done(eo_ctx, event, queue); + break; + + case NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST: + err = em_eo_add_queue_sync(eo_ctx->eo, eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "EO add queue:%" PRI_STAT "", err); + eo_ctx->test_queue_added = true; + notif_queue_group_modify_done(eo_ctx, event, queue); + break; + + case NOTIF_QUEUE_GROUP_MODIFY_DONE: + notif_queue_group_modify_done(eo_ctx, event, queue); + break; + + case NOTIF_EVENT_GROUP_DATA_DONE: + notif_event_group_data_done(eo_ctx, event, queue); + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Unknown notification type:%i!", + app_event->notif.type); + break; + } +} + +/** Helper for receive_event_notif() */ +static void +notif_start_done(app_eo_ctx_t *eo_ctx, em_event_t event, em_queue_t queue) +{ + em_queue_group_t new_qgrp; + em_queue_type_t new_qtype; + const char *new_qtype_str; + em_core_mask_t core_mask; + em_notif_t notif_tbl; + em_status_t err; + const em_queue_group_t qgrp_curr = em_queue_get_group(queue); + app_event_t *app_event = em_event_pointer(event); + + test_fatal_if(app_event->notif.used_group != qgrp_curr, + "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", + app_event->notif.used_group, qgrp_curr); + + /* Create a test queue group */ + snprintf(&eo_ctx->test_qgrp_name[0], + sizeof(eo_ctx->test_qgrp_name), "%s%03i", + TEST_QGRP_NAME_BASE, eo_ctx->test_qgrp_name_nbr); + + eo_ctx->test_qgrp_name[TEST_QGRP_NAME_LEN - 1] = '\0'; + eo_ctx->test_qgrp_name_nbr = (eo_ctx->test_qgrp_name_nbr + 1) + % 1000; /* Range 0-999 */ + + /* Start with EM core-0 (it's always running) */ + em_core_mask_zero(&core_mask); + em_core_mask_set(0, &core_mask); + + /* Re-use event */ + app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE_FIRST; + app_event->notif.used_group = eo_ctx->notif_qgrp; + + notif_tbl.event = event; /* = app_event->notif */ + notif_tbl.queue = queue; + notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; + + em_core_mask_copy(&app_event->notif.core_mask, &core_mask); + + /* + * Create the queue group! + */ + new_qgrp = em_queue_group_create(eo_ctx->test_qgrp_name, &core_mask, + 1, ¬if_tbl); + test_fatal_if(new_qgrp == EM_QUEUE_GROUP_UNDEF, + "Queue group creation failed!"); + + if (eo_ctx->test_qgrp != EM_QUEUE_GROUP_UNDEF) { + /* + * Delete group - no need for notifs since 'modify to zero + * core mask' already done & queue deleted from group. Do the + * delete after the create to force creation of another + * queue group -> avoids always running the test with the same + * queue group. + */ + err = em_queue_group_delete(eo_ctx->test_qgrp, 0, NULL); + test_fatal_if(err != EM_OK, + "Qgrp delete:%" PRI_STAT "", err); + } + /* Store the new queue group to use for this test round */ + eo_ctx->test_qgrp = new_qgrp; + + /* + * Create a test queue for data events. The queue belongs to + * the test queue group. Change the queue type for every new + * test run. + */ + switch (eo_ctx->test_queue_type) { + case EM_QUEUE_TYPE_ATOMIC: + new_qtype = EM_QUEUE_TYPE_PARALLEL; + new_qtype_str = "PARALLEL"; + break; + case EM_QUEUE_TYPE_PARALLEL: + new_qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; + new_qtype_str = "PARALLEL_ORDERED"; + break; + default: + new_qtype = EM_QUEUE_TYPE_ATOMIC; + new_qtype_str = "ATOMIC"; + break; + } + eo_ctx->test_queue_type = new_qtype; + eo_ctx->test_queue = em_queue_create("test_queue", + eo_ctx->test_queue_type, + EM_QUEUE_PRIO_NORMAL, + eo_ctx->test_qgrp, NULL); + test_fatal_if(eo_ctx->test_queue == EM_QUEUE_UNDEF, + "Test queue creation failed!"); + eo_ctx->test_queue_added = false; + + APPL_PRINT("\n" + "Created test queue:%" PRI_QUEUE " type:%s(%u)\t" + "queue group:%" PRI_QGRP " (name:\"%s\")\n", + eo_ctx->test_queue, new_qtype_str, eo_ctx->test_queue_type, + eo_ctx->test_qgrp, eo_ctx->test_qgrp_name); + + memset(&qgrp_shm->app_q_ctx, 0, sizeof(qgrp_shm->app_q_ctx)); + env_atomic64_init(&qgrp_shm->app_q_ctx.event_count); + + err = em_queue_set_context(eo_ctx->test_queue, &qgrp_shm->app_q_ctx); + test_fatal_if(err != EM_OK, "Set queue context:%" PRI_STAT "", err); + /* + * Synchronize EO context. Event is sent through notification, + * which might have happened before we write the eo_ctx. + */ + env_sync_mem(); +} + +/** Helper for receive_event_notif() */ +static void +notif_queue_group_modify_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue) +{ + em_status_t err; + const em_queue_group_t qgrp_curr = em_queue_get_group(queue); + app_event_t *app_event = em_event_pointer(event); + + test_fatal_if(app_event->notif.used_group != qgrp_curr, + "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", + app_event->notif.used_group, qgrp_curr); + + if (unlikely(em_core_mask_iszero(&app_event->notif.core_mask))) { + APPL_PRINT("\n" + "*************************************\n" + "All cores removed from QueueGroup!\n" + "*************************************\n"); + + test_fatal_if(eo_ctx->tot_modify_count != + eo_ctx->tot_modify_count_check, + "Modify count != actual count:\t" + "%" PRIu64 " vs %" PRIu64 "", + eo_ctx->tot_modify_count, + eo_ctx->tot_modify_count_check); + + err = em_eo_remove_queue_sync(eo_ctx->eo, + eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "Remove test queue:%" PRI_STAT "", err); + eo_ctx->test_queue_added = false; + + APPL_PRINT("Deleting test queue:%" PRI_QUEUE ",\t" + "Qgrp ID:%" PRI_QGRP " (name:\"%s\")\n", + eo_ctx->test_queue, eo_ctx->test_qgrp, + eo_ctx->test_qgrp_name); + + err = em_queue_delete(eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "Delete test queue:%" PRI_STAT "", err); + eo_ctx->test_queue = EM_QUEUE_UNDEF; + + /* + * Delete the queue group later in restart after the + * creation of a new group. This forces the creation + * and usage of at least two different queue groups. + */ + app_event->notif.id = EVENT_NOTIF; + app_event->notif.type = NOTIF_RESTART; + app_event->notif.used_group = eo_ctx->notif_qgrp; + err = em_send(event, eo_ctx->notif_queue); + if (unlikely(err != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send to notif queue:%" PRI_STAT "", err); + } + } else { + em_notif_t egroup_notif_tbl[1]; + int i; + + /* Reuse the event */ + app_event->notif.id = EVENT_NOTIF; + app_event->notif.type = NOTIF_EVENT_GROUP_DATA_DONE; + app_event->notif.used_group = eo_ctx->notif_qgrp; + + egroup_notif_tbl[0].event = event; + egroup_notif_tbl[0].queue = eo_ctx->notif_queue; + egroup_notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; + + err = em_event_group_apply(eo_ctx->event_group, + eo_ctx->modify_threshold, 1, + egroup_notif_tbl); + test_fatal_if(err != EM_OK, + "em_event_group_apply():%" PRI_STAT "", err); + + for (i = 0; i < EVENT_DATA_ALLOC_NBR; i++) { + em_event_t ev_data = em_alloc(sizeof(app_event_t), + EM_EVENT_TYPE_SW, + qgrp_shm->pool); + test_fatal_if(ev_data == EM_EVENT_UNDEF, + "Event alloc failed!"); + + app_event_t *app_event = em_event_pointer(ev_data); + + app_event->id = EVENT_DATA; + app_event->data.used_group = eo_ctx->test_qgrp; + + err = em_send_group(ev_data, eo_ctx->test_queue, + eo_ctx->event_group); + if (unlikely(err != EM_OK)) { + em_free(ev_data); + test_fatal_if(!appl_shm->exit_flag, + "Send to test queue:%" PRI_STAT "", + err); + } + } + } +} + +/** Helper for receive_event_notif() */ +static void +notif_event_group_data_done(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue) +{ + em_core_mask_t core_mask, used_mask; + em_notif_t notif_tbl; + em_status_t err; + int core_count; + int i; + const em_queue_group_t qgrp_curr = em_queue_get_group(queue); + app_event_t *app_event = em_event_pointer(event); + + test_fatal_if(app_event->notif.used_group != qgrp_curr, + "Qgrp mismatch: %" PRI_QGRP "!=%" PRI_QGRP "!", + app_event->notif.used_group, qgrp_curr); + + uint64_t mod_cnt = ++eo_ctx->qgrp_modify_count; + + eo_ctx->tot_modify_count_check++; + + err = em_queue_group_get_mask(eo_ctx->test_qgrp, &used_mask); + test_fatal_if(err != EM_OK, + "Get queue group mask:%" PRI_STAT "", err); + + /* Get the next core mask for the test group */ + next_core_mask(/*New*/ &core_mask, /*Max*/ &eo_ctx->core_mask_max, + eo_ctx->tot_modify_count_check); + + if (mod_cnt >= eo_ctx->print_threshold || + em_core_mask_iszero(&core_mask)) { + char used_mask_str[EM_CORE_MASK_STRLEN]; + char core_mask_str[EM_CORE_MASK_STRLEN]; + + em_core_mask_tostr(used_mask_str, EM_CORE_MASK_STRLEN, + &used_mask); + em_core_mask_tostr(core_mask_str, EM_CORE_MASK_STRLEN, + &core_mask); + APPL_PRINT("\n" + "****************************************\n" + "Received %" PRIu64 " events on Q:%" PRI_QUEUE ":\n" + " QueueGroup:%" PRI_QGRP ", Curr Coremask:%s\n" + "Now Modifying:\n" + " QueueGroup:%" PRI_QGRP ", New Coremask:%s\n" + "****************************************\n", + env_atomic64_get(&qgrp_shm->app_q_ctx.event_count), + eo_ctx->test_queue, eo_ctx->test_qgrp, + used_mask_str, eo_ctx->test_qgrp, core_mask_str); + + eo_ctx->qgrp_modify_count = 0; + } + + /* + * Sanity check: verify that all cores that process the queue + * group actually received events and that other cores do not + * get any events. + */ + core_count = em_core_count(); + for (i = 0; i < core_count; i++) { + const uint64_t ev_count = qgrp_shm->core_stat[i].event_count; + char mstr[EM_CORE_MASK_STRLEN]; + + if (em_core_mask_isset(i, &used_mask)) { + if (unlikely(ev_count == 0)) { + em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, + &used_mask); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "No events on core%i, mask:%s", + i, mstr); + } + } else if (unlikely(ev_count > 0)) { + em_core_mask_tostr(mstr, EM_CORE_MASK_STRLEN, + &used_mask); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Events:%" PRIu64 " on inv.core%i, mask:%s", + ev_count, i, mstr); + } + } + + memset(qgrp_shm->core_stat, 0, sizeof(qgrp_shm->core_stat)); + env_atomic64_set(&qgrp_shm->app_q_ctx.event_count, 0); + + /* Reuse the event */ + app_event->id = EVENT_NOTIF; + app_event->notif.type = NOTIF_QUEUE_GROUP_MODIFY_DONE; + app_event->notif.used_group = eo_ctx->notif_qgrp; + em_core_mask_copy(&app_event->notif.core_mask, &core_mask); + + notif_tbl.event = event; + notif_tbl.queue = eo_ctx->notif_queue; + notif_tbl.egroup = EM_EVENT_GROUP_UNDEF; + + err = em_queue_group_modify(eo_ctx->test_qgrp, &core_mask, + 1, ¬if_tbl); + test_fatal_if(err != EM_OK, + "em_queue_group_modify():%" PRI_STAT "", err); +} + +/** + * Handle the test data events received through the test_queue + * + * Check that the queue group is valid and send the data back to the same + * queue for another round. + * The last event should trigger a notification event to be sent to the + * notif_queue to begin the queue group modification sequence. + */ +static inline void +receive_event_data(app_eo_ctx_t *eo_ctx, em_event_t event, + em_queue_t queue, app_q_ctx_t *q_ctx) +{ + int core_id = em_core_id(); + app_event_t *app_event = em_event_pointer(event); + em_queue_group_t qgrp_curr = em_queue_get_group(queue); + em_core_mask_t used_mask; + em_status_t err; + const uint64_t event_count = + env_atomic64_add_return(&q_ctx->event_count, 1); + qgrp_shm->core_stat[core_id].event_count++; + + /* Verify that the queue group is correct & expected */ + test_fatal_if(app_event->data.used_group != qgrp_curr, + "Queue grp mismatch:%" PRI_QGRP "!=%" PRI_QGRP "", + app_event->data.used_group, qgrp_curr); + + /* Verify that this core is a valid receiver of events in this group */ + err = em_queue_group_get_mask(qgrp_curr, &used_mask); + test_fatal_if(err != EM_OK, + "Get queue group mask:%" PRI_STAT "", err); + + if (unlikely(!em_core_mask_isset(core_id, &used_mask))) { + char mask_str[EM_CORE_MASK_STRLEN]; + + em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, &used_mask); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Core bit not set in core mask! core:%02i mask:%s", + core_id, mask_str); + } + + /* + * Handle the test data event + */ + if (event_count <= eo_ctx->modify_threshold - EVENT_DATA_ALLOC_NBR) { + /* Send the data event for another round */ + err = em_send_group(event, eo_ctx->test_queue, + eo_ctx->event_group); + if (unlikely(err != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send to test queue:%" PRI_STAT "", err); + } + } else if (event_count <= eo_ctx->modify_threshold) { + /* + * Free the events for the last round, an event group + * notification event should be triggered when the last event + * has been processed + */ + em_free(event); + } else { + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xacdc, + "Invalid event count(%u)!", event_count); + } +} + +/** + * Await exit_ack to be set by the EO. + */ +static void await_exit_ack(void) +{ + env_time_t t_max = env_time_global_from_ns(20 * 1000000000ULL); /*20s*/ + env_time_t t_now = ENV_TIME_NULL; + env_time_t t_start = env_time_global(); + env_time_t t_end = env_time_sum(t_start, t_max); + uint64_t ns; + uint32_t exit_ack = 0; + + long double sec; + + do { + if (!exit_ack) + em_dispatch(1); + exit_ack = env_atomic32_get(&qgrp_shm->exit_ack); + t_now = env_time_global(); + } while (!exit_ack && env_time_cmp(t_now, t_end) < 0); + + ns = env_time_diff_ns(t_now, t_start); + sec = (long double)ns / 1000000000.0; + + if (unlikely(!exit_ack)) { + test_error(EM_ERR_TIMEOUT, 0xdead, + "Timeout: No exit_ack within %Lfs!\n", sec); + return; + } + + APPL_PRINT("exit_ack in %Lfs on EM-core:%02d => Tearing down\n", + sec, em_core_id()); +} + +/** + * Global start function for the test EO + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + app_eo_ctx_t *eo_ctx = eo_context; + uint64_t tot_modify_count = 0; + uint64_t tmp; + int ret; + + (void)eo; + (void)conf; + + APPL_PRINT("Queue Group Test - Global EO Start\n"); + + snprintf(&eo_ctx->test_qgrp_name[0], + sizeof(eo_ctx->test_qgrp_name), + "%s%03i", TEST_QGRP_NAME_BASE, 0); + + em_core_mask_zero(&eo_ctx->core_mask_max); + em_core_mask_set_count(em_core_count(), &eo_ctx->core_mask_max); + + /* + * The values used below in calculations are derived from the way the + * next_core_mask() function calculates the next core mask to use. + */ + ret = em_core_mask_get_bits(&tmp, 1, &eo_ctx->core_mask_max); + if (unlikely(ret != 1)) { + char mask_str[EM_CORE_MASK_STRLEN]; + + em_core_mask_tostr(mask_str, EM_CORE_MASK_STRLEN, + &eo_ctx->core_mask_max); + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "em_core_mask_get_bits(coremask=%s), ret=%i", + mask_str, ret); + } + + do { + tot_modify_count += (tmp & 0xFF) + 1; + tmp = (tmp >> 4); + if (tmp < 0x10) + break; + } while (tmp); + + tot_modify_count -= 1; + + eo_ctx->tot_modify_count = tot_modify_count; + eo_ctx->tot_modify_count_check = 0; + + eo_ctx->print_threshold = tot_modify_count / TEST_PRINT_COUNT; + + if (eo_ctx->print_threshold == 0) + eo_ctx->print_threshold = 1; + + /* + * 256*15 - 1 is the maximum number of core masks tested when 64 + * cores (max) are running this test. + */ + eo_ctx->modify_threshold = + ((256 * 15 * 0x1000) - 1) / tot_modify_count; + eo_ctx->modify_threshold = ROUND_UP(eo_ctx->modify_threshold, + EVENT_DATA_ALLOC_NBR); + + APPL_PRINT("\n" + "*******************************************************\n" + "Test threshold values set:\n" + " Tot group modifies: %" PRIu64 "\n" + " Events received on group before modify: %" PRIu64 "\n" + " Group modify print threshold: %" PRIu64 "\n" + "*******************************************************\n" + "\n", + tot_modify_count, eo_ctx->modify_threshold, + eo_ctx->print_threshold); + + return EM_OK; +} + +/** + * Global stop function for the test EO + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + em_status_t err; + app_eo_ctx_t *eo_ctx = eo_context; + + /* remove and delete all of the EO's queues */ + err = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(err != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + err, eo); + if (eo_ctx->test_queue != EM_QUEUE_UNDEF && !eo_ctx->test_queue_added) { + err = em_queue_delete(eo_ctx->test_queue); + test_fatal_if(err != EM_OK, + "Delete test queue:%" PRI_STAT "", err); + } + + /* delete the EO at the end of the stop-function */ + err = em_eo_delete(eo); + test_fatal_if(err != EM_OK, + "EO delete:%" PRI_STAT " EO:%" PRI_EO "", + err, eo); + APPL_PRINT("Queue Group Test - Global EO Stop\n"); + + return EM_OK; +} + +/** + * Local start function for the test EO + */ +static em_status_t +start_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + (void)eo; + + APPL_PRINT("Queue Group Test - Local EO Start: EM-core:%02d\n", + em_core_id()); + return EM_OK; +} + +/** + * Local stop function for the test EO + */ +static em_status_t +stop_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + (void)eo; + + APPL_PRINT("Queue Group Test - Local EO Stop: EM-core:%02d\n", + em_core_id()); + return EM_OK; +} + +/** + * Update the core mask: + * E.g. if max_mask is 0xFFFF: 0x0001-0x0100 (256 masks), + * 0x0010->0x1000 (256 masks), 0x0100-0x0000 (255 masks) + */ +static void +next_core_mask(em_core_mask_t *new_mask, em_core_mask_t *max_mask, int count) +{ + uint64_t mask64 = ((uint64_t)(count % 256) + 1) << (4 * (count / 256)); + + em_core_mask_zero(new_mask); + em_core_mask_set_bits(&mask64, 1, new_mask); + em_core_mask_and(new_mask, new_mask, max_mask); +} diff --git a/programs/example/test/test.c b/programs/example/test/test.c index beda8608..7879b792 100644 --- a/programs/example/test/test.c +++ b/programs/example/test/test.c @@ -1,581 +1,581 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * - * EM-ODP test setup - */ - -#include -#include -#include - -#include -#include -#include - -#include "cm_setup.h" - -#define APPL_ESCOPE_TEST (10) - -#define PRINT_EVCOUNT (1 * 1000 * 1000) - -#define TEST_QUEUE_GROUP_NAME "test-qgroup" - -/** - * The number of test queues created and used by the test EO. - */ -#define NBR_TEST_QUEUES 3 - -/** - * Test queue context data - */ -typedef union test_queue_ctx_t { - struct { - /** Input queue (this queue) */ - em_queue_t queue; - /** Queue statistics: events dispatched from queue */ - env_atomic64_t event_count; - }; - uint8_t u8[ENV_CACHE_LINE_SIZE]; -} test_queue_ctx_t ENV_CACHE_LINE_ALIGNED; - -/** - * Core specific stats - */ -typedef union test_core_stat_t { - struct { - /** The number of events dispatched on a core */ - uint64_t event_count; - }; - uint8_t u8[ENV_CACHE_LINE_SIZE]; -} test_core_stat_t ENV_CACHE_LINE_ALIGNED; - -/** - * Test EO context data - */ -typedef struct test_eo_ctx_t { - em_queue_t notif_queue; - em_queue_t queues[NBR_TEST_QUEUES]; - test_queue_ctx_t queue_ctx[NBR_TEST_QUEUES] ENV_CACHE_LINE_ALIGNED; - test_core_stat_t core_stat[MAX_THREADS] ENV_CACHE_LINE_ALIGNED; -} test_eo_ctx_t; - -/** - * Test shared data, shared between all worker threads/processes. - */ -typedef struct test_shm_t { - em_eo_t test_eo; - test_eo_ctx_t test_eo_ctx; -} test_shm_t; - -static ENV_LOCAL test_shm_t *test_shm; - -/* - * Test event content: test event = em_event_pointer(event); - */ -typedef struct test_event_t { - int event_nbr; -} test_event_t; - -static em_status_t -test_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -test_eo_start_local(void *eo_ctx, em_eo_t eo); - -static em_status_t -test_eo_stop(void *eo_ctx, em_eo_t eo); -/* @TBD: local stop not supported yet - * static em_status_t - * test_eo_stop_local(void *eo_ctx, em_eo_t eo); - */ -static void -setup_test_events(em_queue_t queues[], const int nbr_queues); - -static void -test_eo_receive(void *eo_ctx, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -static em_status_t -test_eo_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - char name[] = "TestSharedMem"; - - if (core == 0) - test_shm = env_shared_reserve(name, sizeof(test_shm_t)); - else - test_shm = env_shared_lookup(name); - - if (test_shm == NULL) - APPL_EXIT_FAILURE("%s():EM-core:%d", __func__, em_core_id()); - else if (core == 0) - memset(test_shm, 0, sizeof(test_shm_t)); -} - -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - em_status_t stat, stat_eo_start = EM_ERROR; - test_eo_ctx_t *const test_eo_ctx = &test_shm->test_eo_ctx; - em_notif_t notif_tbl[1]; - em_queue_prio_t queue_prio; - - printf("\n" - "**********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d).\n" - "**********************************************************\n" - "\n", - appl_conf->name, - NO_PATH(__FILE__), __func__, - em_core_id(), - em_core_count(), - appl_conf->num_procs, - appl_conf->num_threads); - - /* Create 3 test queues, one per scheduled queue type: */ - assert(NBR_TEST_QUEUES >= 3); - - eo = em_eo_create("test-eo", test_eo_start, test_eo_start_local, - test_eo_stop, NULL /*stop_local*/, - test_eo_receive, test_eo_ctx); - if (eo == EM_EO_UNDEF) - APPL_EXIT_FAILURE("test-eo creation failed!"); - - /* Store the EO in shared memory */ - test_shm->test_eo = eo; - - em_eo_register_error_handler(eo, test_eo_error_handler); - - memset(notif_tbl, 0, sizeof(notif_tbl)); - notif_tbl[0].event = em_alloc(sizeof(test_event_t), EM_EVENT_TYPE_SW, - EM_POOL_DEFAULT); - notif_tbl[0].queue = em_queue_create("test-q-notif", - EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; - - if (notif_tbl[0].event == EM_EVENT_UNDEF || - notif_tbl[0].queue == EM_QUEUE_UNDEF) - APPL_EXIT_FAILURE("test-eo start notif setup failed!"); - - queue_prio = em_queue_get_priority(notif_tbl[0].queue); - if (queue_prio != EM_QUEUE_PRIO_NORMAL) - APPL_EXIT_FAILURE("notif queue priority comparison failed!"); - - stat = em_eo_add_queue_sync(eo, notif_tbl[0].queue); - if (stat != EM_OK) - APPL_EXIT_FAILURE("test-eo add notif queue failed!"); - - test_eo_ctx->notif_queue = notif_tbl[0].queue; - - stat = em_eo_start(eo, &stat_eo_start, NULL, 1, notif_tbl); - if (stat != EM_OK || stat_eo_start != EM_OK) - APPL_EXIT_FAILURE("test-eo start failed!"); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - const em_eo_t eo = test_shm->test_eo; - em_status_t stat; - - (void)appl_conf; - - printf("%s() on EM-core %d\n", __func__, core); - - stat = em_eo_stop_sync(eo); - if (stat != EM_OK) - APPL_EXIT_FAILURE("test-eo stop failed!"); - stat = em_eo_delete(eo); - if (stat != EM_OK) - APPL_EXIT_FAILURE("test-eo delete failed!"); -} - -void -test_term(void) -{ - int core = em_core_id(); - - printf("%s() on EM-core %d\n", __func__, core); - - if (core == 0) - env_shared_free(test_shm); -} - -static em_status_t -test_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf) -{ - em_status_t stat; - test_eo_ctx_t *const test_eo_ctx = eo_ctx; - int i; - em_queue_group_t test_qgrp; - char test_qgrp_name[sizeof(TEST_QUEUE_GROUP_NAME)]; - - (void)eo; - (void)conf; - - /* For queue group core mask tests: */ - em_core_mask_t mask; - char mstr[EM_CORE_MASK_STRLEN]; - const int mbits_len = (EM_MAX_CORES + 63) / 64; - int len; - uint64_t mbits[mbits_len]; - em_core_mask_t phys_mask; - - /* Queue group core mask tests: */ - stat = em_queue_group_get_mask(EM_QUEUE_GROUP_DEFAULT, &mask); - if (stat != EM_OK) - APPL_EXIT_FAILURE("em_queue_group_get_mask():%" PRI_STAT "", stat); - em_core_mask_tostr(mstr, sizeof(mstr), &mask); - em_core_mask_get_bits(mbits, mbits_len, &mask); - printf("EM_QUEUE_GROUP_DEFAULT:%s\n", mstr); - printf("EM_QUEUE_GROUP_DEFAULT bits:"); - for (i = mbits_len - 1; i >= 0; i--) - printf(" mbits[%d]:0x%" PRIx64 "", i, mbits[i]); - printf("\n"); - - em_core_mask_get_physical(&phys_mask, &mask); - len = em_core_mask_get_bits(mbits, mbits_len, &phys_mask); - if (len <= 0) - APPL_EXIT_FAILURE("em_core_mask_get_bits():%d", len); - printf("physical core mask bits:"); - for (i = len - 1; i >= 0; i--) - printf(" mbits[%d]:0x%" PRIx64 "", i, mbits[i]); - printf("\n"); - - for (i = 0; i < mbits_len; i++) - mbits[i] = 0xabbaacdcdeadbeef; - em_core_mask_set_bits(mbits, mbits_len, &mask); - em_core_mask_tostr(mstr, sizeof(mstr), &mask); - len = em_core_mask_get_bits(mbits, mbits_len, &mask); - if (len <= 0) - APPL_EXIT_FAILURE("em_core_mask_get_bits():%d", len); - printf("core mask test:%s\n", mstr); - printf("core mask test bits:"); - for (i = len - 1; i >= 0; i--) - printf(" mbits[%d]:0x%" PRIx64 "", i, mbits[i]); - printf("\n\n"); - /* end queue group core mask tests */ - - /* Create an atomic queue */ - test_eo_ctx->queues[0] = em_queue_create("test-q-atomic", - EM_QUEUE_TYPE_ATOMIC, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - if (test_eo_ctx->queues[0] == EM_QUEUE_UNDEF) - APPL_EXIT_FAILURE("test-q-atomic creation failed!"); - - /* Create a parallel queue */ - test_eo_ctx->queues[1] = em_queue_create("test-q-parallel", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - if (test_eo_ctx->queues[1] == EM_QUEUE_UNDEF) - APPL_EXIT_FAILURE("test-q-parallel creation failed!"); - - /* Create a parallel-ordered queue */ - test_eo_ctx->queues[2] = em_queue_create("test-q-parord", - EM_QUEUE_TYPE_PARALLEL_ORDERED, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - if (test_eo_ctx->queues[2] == EM_QUEUE_UNDEF) - APPL_EXIT_FAILURE("test-q-parord creation failed!"); - - printf("%s(): Q:%" PRI_QUEUE ", Q:%" PRI_QUEUE ", Q:%" PRI_QUEUE "\n", - __func__, test_eo_ctx->queues[0], test_eo_ctx->queues[1], - test_eo_ctx->queues[2]); - - stat = em_queue_set_context(test_eo_ctx->queues[0], - &test_eo_ctx->queue_ctx[0]); - stat |= em_queue_set_context(test_eo_ctx->queues[1], - &test_eo_ctx->queue_ctx[1]); - stat |= em_queue_set_context(test_eo_ctx->queues[2], - &test_eo_ctx->queue_ctx[2]); - if (stat != EM_OK) - APPL_EXIT_FAILURE("Queue context set failed!"); - - /* Initialize queue context data */ - for (i = 0; i < NBR_TEST_QUEUES; i++) { - test_queue_ctx_t *const test_queue_ctx = - em_queue_get_context(test_eo_ctx->queues[i]); - - if (test_queue_ctx == NULL) - APPL_EXIT_FAILURE("Queue context get failed!"); - /* Store the queue hdl into the queue context */ - test_queue_ctx->queue = test_eo_ctx->queues[i]; - /* Initialize the queue specific event counter */ - env_atomic64_init(&test_queue_ctx->event_count); - } - - stat = em_eo_add_queue_sync(eo, test_eo_ctx->queues[0]); - stat |= em_eo_add_queue_sync(eo, test_eo_ctx->queues[1]); - stat |= em_eo_add_queue_sync(eo, test_eo_ctx->queues[2]); - if (stat != EM_OK) - APPL_EXIT_FAILURE("EO-add-queue failed!"); - - em_core_mask_zero(&mask); - - test_qgrp = em_queue_group_create(TEST_QUEUE_GROUP_NAME, &mask, 0, - NULL); - if (test_qgrp == EM_QUEUE_GROUP_UNDEF) - APPL_EXIT_FAILURE("Test queue group creation failed!"); - - size_t sz = em_queue_group_get_name(test_qgrp, test_qgrp_name, - sizeof(TEST_QUEUE_GROUP_NAME)); - if (sz == 0) - APPL_EXIT_FAILURE("em_queue_group_get_name():%zu", sz); - if (strncmp(test_qgrp_name, TEST_QUEUE_GROUP_NAME, - sizeof(TEST_QUEUE_GROUP_NAME)) != 0) - APPL_EXIT_FAILURE("Test queue group get name failed!"); - - stat = em_queue_group_delete(test_qgrp, 0, NULL); - if (stat != EM_OK) - APPL_EXIT_FAILURE("Test queue group delete failed!"); - - em_core_mask_zero(&mask); - em_core_mask_set_count(1, &mask); - - test_qgrp = em_queue_group_create_sync(TEST_QUEUE_GROUP_NAME, &mask); - if (test_qgrp == EM_QUEUE_GROUP_UNDEF) - APPL_EXIT_FAILURE("Test queue group creation failed!"); - - stat = em_queue_group_delete(test_qgrp, 0, NULL); - if (stat != EM_OK) - APPL_EXIT_FAILURE("Test queue group delete failed!"); - - return EM_OK; -} - -static em_status_t -test_eo_start_local(void *eo_ctx, em_eo_t eo) -{ - (void)eo_ctx; - (void)eo; - - printf("%s(EO:%" PRI_EO ") on EM-core%d\n", - __func__, eo, em_core_id()); - - return EM_OK; -} - -static em_status_t -test_eo_stop(void *eo_ctx, em_eo_t eo) -{ - test_eo_ctx_t *const test_eo_ctx = &test_shm->test_eo_ctx; - em_status_t stat; - int i; - - (void)eo_ctx; - (void)eo; - - /* call to em_eo_stop() earlier has already disabled all queues */ - - for (i = 0; i < NBR_TEST_QUEUES; i++) { - test_queue_ctx_t *const queue_ctx = &test_eo_ctx->queue_ctx[i]; - - stat = em_eo_remove_queue_sync(eo, queue_ctx->queue); - if (stat != EM_OK) - APPL_EXIT_FAILURE("removing queue from eo failed!"); - stat = em_queue_delete(queue_ctx->queue); - if (stat != EM_OK) - APPL_EXIT_FAILURE("test-queue deletion failed!"); - } - - return EM_OK; -} - -/* @TBD: local stop not supported yet! - * static em_status_t - * test_eo_stop_local(void *eo_ctx, em_eo_t eo) - * { - * (void)eo_ctx; - * (void)eo; - * return EM_OK; - * } - */ - -static void -setup_test_events(em_queue_t queues[], const int nbr_queues) -{ - int i, j; - em_status_t stat; - - /* Send test events to the test queues */ - for (i = 0; i < nbr_queues; i++) { - for (j = 0; j < nbr_queues; j++) { - em_event_t event; - test_event_t *test_event; - const size_t event_size = sizeof(test_event_t); - - event = em_alloc(event_size, EM_EVENT_TYPE_SW, - EM_POOL_DEFAULT); - if (event == EM_EVENT_UNDEF) - APPL_EXIT_FAILURE("event alloc failed!"); - - if (event_size != em_event_get_size(event)) - APPL_EXIT_FAILURE("event alloc size error!"); - - /* Print event size info for the first alloc */ - if (i == 0 && j == 0) - printf("%s(): size:em_alloc(%zu)=actual:%zu\n", - __func__, event_size, - em_event_get_size(event)); - - test_event = em_event_pointer(event); - test_event->event_nbr = j; - - stat = em_send(event, queues[i]); - if (stat != EM_OK) - APPL_EXIT_FAILURE("event send failed!"); - } - } -} - -static void -test_eo_receive(void *eo_ctx, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - const int core_id = em_core_id(); - test_eo_ctx_t *const test_eo_ctx = eo_ctx; - test_event_t *const test_event = em_event_pointer(event); - test_queue_ctx_t *const test_queue_ctx = (test_queue_ctx_t *)q_ctx; - em_queue_t queue_out; - em_status_t stat; - uint64_t core_evcnt; - uint64_t queue_evcnt; - int idx; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(queue == test_eo_ctx->notif_queue)) { - printf("%s(): EO start-local notif, cores ready: ", __func__); - setup_test_events(test_eo_ctx->queues, NBR_TEST_QUEUES); - em_free(event); - return; - } - - queue_evcnt = env_atomic64_add_return(&test_queue_ctx->event_count, 1); - core_evcnt = ++test_eo_ctx->core_stat[core_id].event_count; - - idx = test_event->event_nbr % NBR_TEST_QUEUES; - test_event->event_nbr += 1; - queue_out = test_eo_ctx->queues[idx]; - - if (queue_evcnt % PRINT_EVCOUNT == 1) { - em_queue_type_t queue_type = em_queue_get_type(queue); - const char *qtype_name; - char qname[EM_QUEUE_NAME_LEN]; - size_t len; - - len = em_queue_get_name(queue, qname, sizeof(qname)); - if (len == 0) /* all test queues have names */ - APPL_EXIT_FAILURE("queue name error!"); - - switch (queue_type) { - case EM_QUEUE_TYPE_ATOMIC: - qtype_name = "type:atomic "; - break; - case EM_QUEUE_TYPE_PARALLEL: - qtype_name = "type:parallel"; - break; - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - qtype_name = "type:ordered "; - break; - default: - qtype_name = "type:undef "; - break; - } - - printf("%s:%" PRI_QUEUE "\t%s %10" PRIu64 " events\t" - "| Core%02d:%10" PRIu64 " events\t" - "| this event scheduled:%10d times\n", - qname, queue, qtype_name, queue_evcnt, - core_id, core_evcnt, test_event->event_nbr); - } - - stat = em_send(event, queue_out); - if (unlikely(stat != EM_OK)) { - em_free(event); - if (!appl_shm->exit_flag) - APPL_EXIT_FAILURE("event send failed!"); - } -} - -static em_status_t -test_eo_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args) -{ - const char *str; - - str = va_arg(args, const char*); - - printf("%s EO %" PRI_EO " error 0x%08X escope 0x%X core %d\n", - str, eo, error, escope, em_core_id()); - - return error; -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * + * EM-ODP test setup + */ + +#include +#include +#include + +#include +#include +#include + +#include "cm_setup.h" + +#define APPL_ESCOPE_TEST (10) + +#define PRINT_EVCOUNT (1 * 1000 * 1000) + +#define TEST_QUEUE_GROUP_NAME "test-qgroup" + +/** + * The number of test queues created and used by the test EO. + */ +#define NBR_TEST_QUEUES 3 + +/** + * Test queue context data + */ +typedef union test_queue_ctx_t { + struct { + /** Input queue (this queue) */ + em_queue_t queue; + /** Queue statistics: events dispatched from queue */ + env_atomic64_t event_count; + }; + uint8_t u8[ENV_CACHE_LINE_SIZE]; +} test_queue_ctx_t ENV_CACHE_LINE_ALIGNED; + +/** + * Core specific stats + */ +typedef union test_core_stat_t { + struct { + /** The number of events dispatched on a core */ + uint64_t event_count; + }; + uint8_t u8[ENV_CACHE_LINE_SIZE]; +} test_core_stat_t ENV_CACHE_LINE_ALIGNED; + +/** + * Test EO context data + */ +typedef struct test_eo_ctx_t { + em_queue_t notif_queue; + em_queue_t queues[NBR_TEST_QUEUES]; + test_queue_ctx_t queue_ctx[NBR_TEST_QUEUES] ENV_CACHE_LINE_ALIGNED; + test_core_stat_t core_stat[MAX_THREADS] ENV_CACHE_LINE_ALIGNED; +} test_eo_ctx_t; + +/** + * Test shared data, shared between all worker threads/processes. + */ +typedef struct test_shm_t { + em_eo_t test_eo; + test_eo_ctx_t test_eo_ctx; +} test_shm_t; + +static ENV_LOCAL test_shm_t *test_shm; + +/* + * Test event content: test event = em_event_pointer(event); + */ +typedef struct test_event_t { + int event_nbr; +} test_event_t; + +static em_status_t +test_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +test_eo_start_local(void *eo_ctx, em_eo_t eo); + +static em_status_t +test_eo_stop(void *eo_ctx, em_eo_t eo); +/* @TBD: local stop not supported yet + * static em_status_t + * test_eo_stop_local(void *eo_ctx, em_eo_t eo); + */ +static void +setup_test_events(em_queue_t queues[], const int nbr_queues); + +static void +test_eo_receive(void *eo_ctx, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static em_status_t +test_eo_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + char name[] = "TestSharedMem"; + + if (core == 0) + test_shm = env_shared_reserve(name, sizeof(test_shm_t)); + else + test_shm = env_shared_lookup(name); + + if (test_shm == NULL) + APPL_EXIT_FAILURE("%s():EM-core:%d", __func__, em_core_id()); + else if (core == 0) + memset(test_shm, 0, sizeof(test_shm_t)); +} + +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + em_status_t stat, stat_eo_start = EM_ERROR; + test_eo_ctx_t *const test_eo_ctx = &test_shm->test_eo_ctx; + em_notif_t notif_tbl[1]; + em_queue_prio_t queue_prio; + + printf("\n" + "**********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d).\n" + "**********************************************************\n" + "\n", + appl_conf->name, + NO_PATH(__FILE__), __func__, + em_core_id(), + em_core_count(), + appl_conf->num_procs, + appl_conf->num_threads); + + /* Create 3 test queues, one per scheduled queue type: */ + assert(NBR_TEST_QUEUES >= 3); + + eo = em_eo_create("test-eo", test_eo_start, test_eo_start_local, + test_eo_stop, NULL /*stop_local*/, + test_eo_receive, test_eo_ctx); + if (eo == EM_EO_UNDEF) + APPL_EXIT_FAILURE("test-eo creation failed!"); + + /* Store the EO in shared memory */ + test_shm->test_eo = eo; + + em_eo_register_error_handler(eo, test_eo_error_handler); + + memset(notif_tbl, 0, sizeof(notif_tbl)); + notif_tbl[0].event = em_alloc(sizeof(test_event_t), EM_EVENT_TYPE_SW, + EM_POOL_DEFAULT); + notif_tbl[0].queue = em_queue_create("test-q-notif", + EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + notif_tbl[0].egroup = EM_EVENT_GROUP_UNDEF; + + if (notif_tbl[0].event == EM_EVENT_UNDEF || + notif_tbl[0].queue == EM_QUEUE_UNDEF) + APPL_EXIT_FAILURE("test-eo start notif setup failed!"); + + queue_prio = em_queue_get_priority(notif_tbl[0].queue); + if (queue_prio != EM_QUEUE_PRIO_NORMAL) + APPL_EXIT_FAILURE("notif queue priority comparison failed!"); + + stat = em_eo_add_queue_sync(eo, notif_tbl[0].queue); + if (stat != EM_OK) + APPL_EXIT_FAILURE("test-eo add notif queue failed!"); + + test_eo_ctx->notif_queue = notif_tbl[0].queue; + + stat = em_eo_start(eo, &stat_eo_start, NULL, 1, notif_tbl); + if (stat != EM_OK || stat_eo_start != EM_OK) + APPL_EXIT_FAILURE("test-eo start failed!"); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + const em_eo_t eo = test_shm->test_eo; + em_status_t stat; + + (void)appl_conf; + + printf("%s() on EM-core %d\n", __func__, core); + + stat = em_eo_stop_sync(eo); + if (stat != EM_OK) + APPL_EXIT_FAILURE("test-eo stop failed!"); + stat = em_eo_delete(eo); + if (stat != EM_OK) + APPL_EXIT_FAILURE("test-eo delete failed!"); +} + +void +test_term(void) +{ + int core = em_core_id(); + + printf("%s() on EM-core %d\n", __func__, core); + + if (core == 0) + env_shared_free(test_shm); +} + +static em_status_t +test_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_status_t stat; + test_eo_ctx_t *const test_eo_ctx = eo_ctx; + int i; + em_queue_group_t test_qgrp; + char test_qgrp_name[sizeof(TEST_QUEUE_GROUP_NAME)]; + + (void)eo; + (void)conf; + + /* For queue group core mask tests: */ + em_core_mask_t mask; + char mstr[EM_CORE_MASK_STRLEN]; + const int mbits_len = (EM_MAX_CORES + 63) / 64; + int len; + uint64_t mbits[mbits_len]; + em_core_mask_t phys_mask; + + /* Queue group core mask tests: */ + stat = em_queue_group_get_mask(EM_QUEUE_GROUP_DEFAULT, &mask); + if (stat != EM_OK) + APPL_EXIT_FAILURE("em_queue_group_get_mask():%" PRI_STAT "", stat); + em_core_mask_tostr(mstr, sizeof(mstr), &mask); + em_core_mask_get_bits(mbits, mbits_len, &mask); + printf("EM_QUEUE_GROUP_DEFAULT:%s\n", mstr); + printf("EM_QUEUE_GROUP_DEFAULT bits:"); + for (i = mbits_len - 1; i >= 0; i--) + printf(" mbits[%d]:0x%" PRIx64 "", i, mbits[i]); + printf("\n"); + + em_core_mask_get_physical(&phys_mask, &mask); + len = em_core_mask_get_bits(mbits, mbits_len, &phys_mask); + if (len <= 0) + APPL_EXIT_FAILURE("em_core_mask_get_bits():%d", len); + printf("physical core mask bits:"); + for (i = len - 1; i >= 0; i--) + printf(" mbits[%d]:0x%" PRIx64 "", i, mbits[i]); + printf("\n"); + + for (i = 0; i < mbits_len; i++) + mbits[i] = 0xabbaacdcdeadbeef; + em_core_mask_set_bits(mbits, mbits_len, &mask); + em_core_mask_tostr(mstr, sizeof(mstr), &mask); + len = em_core_mask_get_bits(mbits, mbits_len, &mask); + if (len <= 0) + APPL_EXIT_FAILURE("em_core_mask_get_bits():%d", len); + printf("core mask test:%s\n", mstr); + printf("core mask test bits:"); + for (i = len - 1; i >= 0; i--) + printf(" mbits[%d]:0x%" PRIx64 "", i, mbits[i]); + printf("\n\n"); + /* end queue group core mask tests */ + + /* Create an atomic queue */ + test_eo_ctx->queues[0] = em_queue_create("test-q-atomic", + EM_QUEUE_TYPE_ATOMIC, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + if (test_eo_ctx->queues[0] == EM_QUEUE_UNDEF) + APPL_EXIT_FAILURE("test-q-atomic creation failed!"); + + /* Create a parallel queue */ + test_eo_ctx->queues[1] = em_queue_create("test-q-parallel", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + if (test_eo_ctx->queues[1] == EM_QUEUE_UNDEF) + APPL_EXIT_FAILURE("test-q-parallel creation failed!"); + + /* Create a parallel-ordered queue */ + test_eo_ctx->queues[2] = em_queue_create("test-q-parord", + EM_QUEUE_TYPE_PARALLEL_ORDERED, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + if (test_eo_ctx->queues[2] == EM_QUEUE_UNDEF) + APPL_EXIT_FAILURE("test-q-parord creation failed!"); + + printf("%s(): Q:%" PRI_QUEUE ", Q:%" PRI_QUEUE ", Q:%" PRI_QUEUE "\n", + __func__, test_eo_ctx->queues[0], test_eo_ctx->queues[1], + test_eo_ctx->queues[2]); + + stat = em_queue_set_context(test_eo_ctx->queues[0], + &test_eo_ctx->queue_ctx[0]); + stat |= em_queue_set_context(test_eo_ctx->queues[1], + &test_eo_ctx->queue_ctx[1]); + stat |= em_queue_set_context(test_eo_ctx->queues[2], + &test_eo_ctx->queue_ctx[2]); + if (stat != EM_OK) + APPL_EXIT_FAILURE("Queue context set failed!"); + + /* Initialize queue context data */ + for (i = 0; i < NBR_TEST_QUEUES; i++) { + test_queue_ctx_t *const test_queue_ctx = + em_queue_get_context(test_eo_ctx->queues[i]); + + if (test_queue_ctx == NULL) + APPL_EXIT_FAILURE("Queue context get failed!"); + /* Store the queue hdl into the queue context */ + test_queue_ctx->queue = test_eo_ctx->queues[i]; + /* Initialize the queue specific event counter */ + env_atomic64_init(&test_queue_ctx->event_count); + } + + stat = em_eo_add_queue_sync(eo, test_eo_ctx->queues[0]); + stat |= em_eo_add_queue_sync(eo, test_eo_ctx->queues[1]); + stat |= em_eo_add_queue_sync(eo, test_eo_ctx->queues[2]); + if (stat != EM_OK) + APPL_EXIT_FAILURE("EO-add-queue failed!"); + + em_core_mask_zero(&mask); + + test_qgrp = em_queue_group_create(TEST_QUEUE_GROUP_NAME, &mask, 0, + NULL); + if (test_qgrp == EM_QUEUE_GROUP_UNDEF) + APPL_EXIT_FAILURE("Test queue group creation failed!"); + + size_t sz = em_queue_group_get_name(test_qgrp, test_qgrp_name, + sizeof(TEST_QUEUE_GROUP_NAME)); + if (sz == 0) + APPL_EXIT_FAILURE("em_queue_group_get_name():%zu", sz); + if (strncmp(test_qgrp_name, TEST_QUEUE_GROUP_NAME, + sizeof(TEST_QUEUE_GROUP_NAME)) != 0) + APPL_EXIT_FAILURE("Test queue group get name failed!"); + + stat = em_queue_group_delete(test_qgrp, 0, NULL); + if (stat != EM_OK) + APPL_EXIT_FAILURE("Test queue group delete failed!"); + + em_core_mask_zero(&mask); + em_core_mask_set_count(1, &mask); + + test_qgrp = em_queue_group_create_sync(TEST_QUEUE_GROUP_NAME, &mask); + if (test_qgrp == EM_QUEUE_GROUP_UNDEF) + APPL_EXIT_FAILURE("Test queue group creation failed!"); + + stat = em_queue_group_delete(test_qgrp, 0, NULL); + if (stat != EM_OK) + APPL_EXIT_FAILURE("Test queue group delete failed!"); + + return EM_OK; +} + +static em_status_t +test_eo_start_local(void *eo_ctx, em_eo_t eo) +{ + (void)eo_ctx; + (void)eo; + + printf("%s(EO:%" PRI_EO ") on EM-core%d\n", + __func__, eo, em_core_id()); + + return EM_OK; +} + +static em_status_t +test_eo_stop(void *eo_ctx, em_eo_t eo) +{ + test_eo_ctx_t *const test_eo_ctx = &test_shm->test_eo_ctx; + em_status_t stat; + int i; + + (void)eo_ctx; + (void)eo; + + /* call to em_eo_stop() earlier has already disabled all queues */ + + for (i = 0; i < NBR_TEST_QUEUES; i++) { + test_queue_ctx_t *const queue_ctx = &test_eo_ctx->queue_ctx[i]; + + stat = em_eo_remove_queue_sync(eo, queue_ctx->queue); + if (stat != EM_OK) + APPL_EXIT_FAILURE("removing queue from eo failed!"); + stat = em_queue_delete(queue_ctx->queue); + if (stat != EM_OK) + APPL_EXIT_FAILURE("test-queue deletion failed!"); + } + + return EM_OK; +} + +/* @TBD: local stop not supported yet! + * static em_status_t + * test_eo_stop_local(void *eo_ctx, em_eo_t eo) + * { + * (void)eo_ctx; + * (void)eo; + * return EM_OK; + * } + */ + +static void +setup_test_events(em_queue_t queues[], const int nbr_queues) +{ + int i, j; + em_status_t stat; + + /* Send test events to the test queues */ + for (i = 0; i < nbr_queues; i++) { + for (j = 0; j < nbr_queues; j++) { + em_event_t event; + test_event_t *test_event; + const uint32_t event_size = sizeof(test_event_t); + + event = em_alloc(event_size, EM_EVENT_TYPE_SW, + EM_POOL_DEFAULT); + if (event == EM_EVENT_UNDEF) + APPL_EXIT_FAILURE("event alloc failed!"); + + if (event_size != em_event_get_size(event)) + APPL_EXIT_FAILURE("event alloc size error!"); + + /* Print event size info for the first alloc */ + if (i == 0 && j == 0) + printf("%s(): size:em_alloc(%u)=actual:%u\n", + __func__, event_size, + em_event_get_size(event)); + + test_event = em_event_pointer(event); + test_event->event_nbr = j; + + stat = em_send(event, queues[i]); + if (stat != EM_OK) + APPL_EXIT_FAILURE("event send failed!"); + } + } +} + +static void +test_eo_receive(void *eo_ctx, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + const int core_id = em_core_id(); + test_eo_ctx_t *const test_eo_ctx = eo_ctx; + test_event_t *const test_event = em_event_pointer(event); + test_queue_ctx_t *const test_queue_ctx = (test_queue_ctx_t *)q_ctx; + em_queue_t queue_out; + em_status_t stat; + uint64_t core_evcnt; + uint64_t queue_evcnt; + int idx; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(queue == test_eo_ctx->notif_queue)) { + printf("%s(): EO start-local notif, cores ready: ", __func__); + setup_test_events(test_eo_ctx->queues, NBR_TEST_QUEUES); + em_free(event); + return; + } + + queue_evcnt = env_atomic64_add_return(&test_queue_ctx->event_count, 1); + core_evcnt = ++test_eo_ctx->core_stat[core_id].event_count; + + idx = test_event->event_nbr % NBR_TEST_QUEUES; + test_event->event_nbr += 1; + queue_out = test_eo_ctx->queues[idx]; + + if (queue_evcnt % PRINT_EVCOUNT == 1) { + em_queue_type_t queue_type = em_queue_get_type(queue); + const char *qtype_name; + char qname[EM_QUEUE_NAME_LEN]; + size_t len; + + len = em_queue_get_name(queue, qname, sizeof(qname)); + if (len == 0) /* all test queues have names */ + APPL_EXIT_FAILURE("queue name error!"); + + switch (queue_type) { + case EM_QUEUE_TYPE_ATOMIC: + qtype_name = "type:atomic "; + break; + case EM_QUEUE_TYPE_PARALLEL: + qtype_name = "type:parallel"; + break; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + qtype_name = "type:ordered "; + break; + default: + qtype_name = "type:undef "; + break; + } + + printf("%s:%" PRI_QUEUE "\t%s %10" PRIu64 " events\t" + "| Core%02d:%10" PRIu64 " events\t" + "| this event scheduled:%10d times\n", + qname, queue, qtype_name, queue_evcnt, + core_id, core_evcnt, test_event->event_nbr); + } + + stat = em_send(event, queue_out); + if (unlikely(stat != EM_OK)) { + em_free(event); + if (!appl_shm->exit_flag) + APPL_EXIT_FAILURE("event send failed!"); + } +} + +static em_status_t +test_eo_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args) +{ + const char *str; + + str = va_arg(args, const char*); + + printf("%s EO %" PRI_EO " error 0x%08X escope 0x%X core %d\n", + str, eo, error, escope, em_core_id()); + + return error; +} diff --git a/programs/packet_io/Makefile.am b/programs/packet_io/Makefile.am index 73004f92..8ceb10b7 100644 --- a/programs/packet_io/Makefile.am +++ b/programs/packet_io/Makefile.am @@ -6,7 +6,8 @@ noinst_PROGRAMS = loopback \ loopback_local \ loopback_local_multircv \ multi_stage \ - multi_stage_local + multi_stage_local \ + l2fwd loopback_LDFLAGS = $(AM_LDFLAGS) loopback_CFLAGS = $(AM_CFLAGS) @@ -36,6 +37,10 @@ multi_stage_local_LDFLAGS = $(AM_LDFLAGS) multi_stage_local_CFLAGS = $(AM_CFLAGS) multi_stage_local_CFLAGS += -I$(top_srcdir)/src +l2fwd_LDFLAGS = $(AM_LDFLAGS) +l2fwd_CFLAGS = $(AM_CFLAGS) +l2fwd_CFLAGS += -I$(top_srcdir)/src + dist_loopback_SOURCES = loopback.c dist_loopback_multircv_SOURCES = loopback_multircv.c dist_loopback_ag_SOURCES = loopback_ag.c @@ -43,3 +48,4 @@ dist_loopback_local_SOURCES = loopback_local.c dist_loopback_local_multircv_SOURCES = loopback_local_multircv.c dist_multi_stage_SOURCES = multi_stage.c dist_multi_stage_local_SOURCES = multi_stage_local.c +dist_l2fwd_SOURCES = l2fwd.c diff --git a/programs/packet_io/l2fwd.c b/programs/packet_io/l2fwd.c new file mode 100644 index 00000000..6f95b6d6 --- /dev/null +++ b/programs/packet_io/l2fwd.c @@ -0,0 +1,553 @@ +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Simple Load Balanced Packet-IO L2 forward/loopback application. + * + * An application (EO) that receives ETH frames and sends them back. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ + +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ + +#define NUM_PKTIN_QUEUES EM_MAX_CORES +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +/** + * EO context + */ +typedef struct { + em_eo_t eo; + char name[32]; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; +} eo_context_t; + +/** + * Queue-Context, i.e. queue specific data, each queue has its own instance + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; + /** queue handle */ + em_queue_t queue; +} queue_context_t; + +/** + * Packet L2fwd shared memory + */ +typedef struct { + /** EO (application) context */ + eo_context_t eo_ctx; + /** + * Array containing the contexts of all the queues handled by the EO. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; + + /** Ptr to the cm_pktio shared mem */ + pktio_shm_t *pktio_shm; +} l2fwd_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL l2fwd_shm_t *l2fwd_shm; + +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_eo_event_multi(void *eo_ctx, + em_event_t events[], int num, + em_queue_t queue, void *q_ctx); +static inline void +receive_vector(em_event_t vector, const queue_context_t *q_ctx); + +static inline void +receive_vector_multi(em_event_t vectors[], int num, + const queue_context_t *q_ctx); +static inline void +receive_packet_multi(em_event_t events[], int num, + const queue_context_t *q_ctx); + +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Loopback test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + l2fwd_shm = env_shared_reserve("PktL2fwdShMem", + sizeof(l2fwd_shm_t)); + em_register_error_handler(test_error_handler); + } else { + l2fwd_shm = env_shared_lookup("PktL2fwdShMem"); + } + + if (l2fwd_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Loopback init failed on EM-core: %u", + em_core_id()); + else if (core == 0) { + memset(l2fwd_shm, 0, sizeof(l2fwd_shm_t)); + + odp_shm_t shm = odp_shm_lookup("pktio_shm"); + void *shm_addr = odp_shm_addr(shm); + + test_fatal_if(shm_addr == NULL, + "pktio shared mem addr lookup failed"); + l2fwd_shm->pktio_shm = shm_addr; + } +} + +/** + * Startup of the Packet Loopback test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + eo_context_t *eo_ctx; + em_status_t ret, start_fn_ret = EM_ERROR; + int if_id, i; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_sched_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only scheduled pktin-modes: %s(%i), %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(SCHED_PARALLEL), SCHED_PARALLEL, + pktin_mode_str(SCHED_ATOMIC), SCHED_ATOMIC, + pktin_mode_str(SCHED_ORDERED), SCHED_ORDERED); + + /* + * Create one EO + */ + eo_ctx = &l2fwd_shm->eo_ctx; + /* Initialize EO context data to '0' */ + memset(eo_ctx, 0, sizeof(eo_context_t)); + + em_eo_multircv_param_t eo_param; + + /* Init EO params */ + em_eo_multircv_param_init(&eo_param); + /* Set EO params needed by this application */ + eo_param.start = start_eo; + eo_param.local_start = start_eo_local; + eo_param.stop = stop_eo; + eo_param.receive_multi = receive_eo_event_multi; + eo_param.max_events = 0; /* use default */ + eo_param.eo_ctx = eo_ctx; + + eo = em_eo_create_multircv(appl_conf->name, &eo_param); + test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); + eo_ctx->eo = eo; + + /* Store the number of pktio interfaces used */ + eo_ctx->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + eo_ctx->if_ids[i] = if_id; + } + + /* Start the EO - queues etc. created in the EO start function */ + ret = em_eo_start_sync(eo, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); +} + +void test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + eo_context_t *const eo_ctx = &l2fwd_shm->eo_ctx; + em_eo_t eo = eo_ctx->eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(l2fwd_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + * + * The global start function creates the application specific queues and + * associates the queues with the EO and the packet flows it wants to process. + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_queue_t pktout_queue; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + em_status_t ret; + eo_context_t *const eo_ctx = eo_context; + int if_id; + int i, j; + + (void)conf; + + /* Store the EO name in the EO-context data */ + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", + eo, eo_ctx->name, eo_ctx->if_count); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + eo_ctx->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + char qname[EM_QUEUE_NAME_LEN]; + + snprintf(qname, sizeof(qname), "pktout-queue-%d-%d", i, j); + + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create(qname, EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + eo_ctx->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* Add pktin queues to the EO */ + int if_cnt = l2fwd_shm->pktio_shm->ifs.count; + int q_ctx_idx = 0; + + for (int i = 0; i < if_cnt; i++) { + int if_qcnt = l2fwd_shm->pktio_shm->pktin.num_queues[i]; + + for (int q = 0; q < if_qcnt; q++) { + em_queue_t in_queue = l2fwd_shm->pktio_shm->pktin.sched_em_queues[i][q]; + queue_context_t *q_ctx = &l2fwd_shm->eo_q_ctx[q_ctx_idx]; + + q_ctx->queue = in_queue; + + ret = em_queue_set_context(in_queue, q_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx failed:%" PRI_STAT "\n" + "EO-q-ctx:%d in-Q:%" PRI_QUEUE "", + ret, q_ctx_idx, in_queue); + + ret = em_eo_add_queue_sync(eo, in_queue); + test_fatal_if(ret != EM_OK, + "Add in_queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " in-Q:%" PRI_QUEUE "", + ret, eo, in_queue); + /* + * Set the pktout queues to use for this input queue, + * one pktout queue per interface. + */ + set_pktout_queues(in_queue, eo_ctx, q_ctx->pktout_queue/*out*/); + + q_ctx_idx++; + } + } + + APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); + + return EM_OK; +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]) +{ + int if_count = eo_ctx->if_count; + int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = eo_ctx->if_ids[i]; + pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; + } +} + +/** + * EO Local start function (run once at startup on EACH core) + + * Not really needed in this application, but included + * to demonstrate usage. + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + + APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", + eo, eo_ctx->name, em_core_id()); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + em_status_t err; + em_queue_t pktout_queue; + int if_id; + int i, j; + + APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); + + /* remove and delete all of the EO's queues */ + err = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(err != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + err, eo); + + /* Delete the packet output queues created for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = eo_ctx->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + err = em_queue_delete(pktout_queue); + test_fatal_if(err != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } + + return EM_OK; +} + +static inline void +receive_packet_multi(em_event_t events[], int num, + const queue_context_t *q_ctx) +{ + int port = pktio_input_port(events[0]); /* same port for all from same queue */ + em_queue_t pktout_queue = q_ctx->pktout_queue[port]; + + /* Touch packet. Swap MAC addresses: scr<->dst */ + for (int i = 0; i < num; i++) + pktio_swap_eth_addrs(events[i]); + + int sent = em_send_multi(events, num, pktout_queue); + + if (unlikely(sent < num)) + em_free_multi(&events[sent], num - sent); +} + +static inline void +receive_vector(em_event_t vector, const queue_context_t *q_ctx) +{ + em_event_t *ev_tbl; + + uint32_t num = em_event_vector_tbl(vector, &ev_tbl); + + if (unlikely(num == 0)) { + em_event_vector_free(vector); + return; + } + + receive_packet_multi(ev_tbl, (int)num, q_ctx); + + em_event_vector_free(vector); +} + +static inline void +receive_vector_multi(em_event_t vectors[], int num, + const queue_context_t *q_ctx) +{ + for (int i = 0; i < num; i++) + receive_vector(vectors[i], q_ctx); +} + +/** + * EO's event receive-multi function + */ +static void +receive_eo_event_multi(void *eo_ctx, + em_event_t events[], int num, + em_queue_t queue, void *queue_context) +{ + const queue_context_t *q_ctx = queue_context; + + (void)eo_ctx; + (void)queue; + + if (unlikely(appl_shm->exit_flag)) { + em_free_multi(events, num); + return; + } + + em_event_type_t same_type = EM_EVENT_TYPE_UNDEF; + int num_same; + + for (int i = 0; i < num && + (num_same = em_event_same_type_multi(&events[i], num - i, &same_type)) > 0; + i += num_same) { + em_event_type_t major_type = em_event_type_major(same_type); + + if (likely(major_type == EM_EVENT_TYPE_VECTOR)) + receive_vector_multi(&events[i], num_same, q_ctx); + else if (likely(major_type == EM_EVENT_TYPE_PACKET)) + receive_packet_multi(&events[i], num_same, q_ctx); + else + em_free_multi(&events[i], num_same); + } +} diff --git a/programs/packet_io/loopback.c b/programs/packet_io/loopback.c index 69507f98..abfa5d4e 100644 --- a/programs/packet_io/loopback.c +++ b/programs/packet_io/loopback.c @@ -1,800 +1,810 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Simple Load Balanced Packet-IO test application. - * - * An application (EO) that receives UDP datagrams and exchanges - * the src-dst addesses before sending the datagram back out. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ - -/** - * Set the used queue type for EM queues receiving packet data. - * - * Try also with EM_QUEUE_TYPE_PARALLEL or EM_QUEUE_TYPE_PARALLEL_ORDERED. - * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. - */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ - -/** - * Test with all different queue types simultaneously: - * ATOMIC, PARALLELL, PARALLEL_ORDERED - */ -#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ - -/** - * Create an EM queue per UDP/IP flow or use the default queue. - * - * If set to '0' then all traffic is routed through one 'default queue'(slow), - * if set to '1' each traffic flow is routed to its own EM-queue. - */ -#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ - -/** - * Select whether the UDP ports should be unique over all the IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured - * once for each IP-interface). Using '0' (not unique) makes it easier to - * copy traffic generator settings from one IF-port to another as only the - * dst-IP address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** - * Select whether the input and output ports should be cross-connected. - */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** - * Enable per packet error checking - */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -/* Configure the IP addresses and UDP ports that this application will use */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 64 - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 -/* - * IANA Dynamic Ports (Private or Ephemeral Ports), - * from 49152 to 65535 (never assigned) - */ -/* #define UDP_PORT_BASE 0xC000 */ - -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ - -#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -/** - * EO context - */ -typedef struct { - em_eo_t eo; - char name[32]; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /** default queue: pkts/events not matching any other input criteria */ - em_queue_t default_queue; - /** all created input queues */ - em_queue_t queue[NUM_PKTIN_QUEUES]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue-Context, i.e. queue specific data, each queue has its own instance - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** queue handle */ - em_queue_t queue; -} queue_context_t; - -/** - * Packet Loopback shared memory - */ -typedef struct { - /** EO (application) context */ - eo_context_t eo_ctx; - /** - * Array containing the contexts of all the queues handled by the EO. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; - - /** Queue context for the default queue */ - queue_context_t def_q_ctx; -} packet_loopback_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL packet_loopback_shm_t *pkt_shm; - -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); - -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context); - -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Loopback test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktLoopShMem", - sizeof(packet_loopback_shm_t)); - em_register_error_handler(test_error_handler); - } else { - pkt_shm = env_shared_lookup("PktLoopShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Loopback init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); -} - -/** - * Startup of the Packet Loopback test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - eo_context_t *eo_ctx; - em_status_t ret, start_fn_ret = EM_ERROR; - int if_id, i; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* - * Create one EO - */ - eo_ctx = &pkt_shm->eo_ctx; - /* Initialize EO context data to '0' */ - memset(eo_ctx, 0, sizeof(eo_context_t)); - - eo = em_eo_create(appl_conf->name, start_eo, start_eo_local, - stop_eo, NULL, receive_eo_packet, eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); - eo_ctx->eo = eo; - - /* Store the number of pktio interfaces used */ - eo_ctx->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - eo_ctx->if_ids[i] = if_id; - } - - /* Start the EO - queues etc. created in the EO start function */ - ret = em_eo_start_sync(eo, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * All input & output queues have been created and enabled in the - * EO start function, now direct pktio traffic to those queues. - */ - for (i = 0; i < NUM_PKTIN_QUEUES; i++) { - /* Direct ip_addr:udp_port into this queue */ - queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; - uint32_t ip_addr = q_ctx->flow_params.ipv4; - uint16_t port = q_ctx->flow_params.port; - uint8_t proto = q_ctx->flow_params.proto; - em_queue_t queue = q_ctx->queue; - em_queue_t tmp_q; - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - pktio_add_queue(proto, ip_addr, port, queue); - - /* Sanity checks (lookup what was configured) */ - tmp_q = pktio_lookup_sw(proto, ip_addr, port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, port, queue, tmp_q); - /* Print first and last mapping */ - if (i == 0 || i == NUM_PKTIN_QUEUES - 1) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, port, tmp_q); - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(eo_ctx->default_queue); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; - em_eo_t eo = eo_ctx->eo; - em_status_t ret; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - * - * The global start function creates the application specific queues and - * associates the queues with the EO and the packet flows it wants to process. - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - em_queue_t def_queue, pktout_queue; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - em_status_t ret; - eo_context_t *const eo_ctx = eo_context; - queue_context_t *defq_ctx; - int if_id; - int i, j; - - (void)conf; - - /* Store the EO name in the EO-context data */ - em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); - - APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", - eo, eo_ctx->name, eo_ctx->if_count); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - eo_ctx->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - eo_ctx->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* - * Default queue for all packets not mathing any - * specific input queue criteria - */ - def_queue = em_queue_create("default", QUEUE_TYPE, EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(def_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed"); - - /* Store the default queue Id in the EO-context data */ - eo_ctx->default_queue = def_queue; - - /* Associate the queue with this EO */ - ret = em_eo_add_queue_sync(eo, def_queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo, def_queue); - - /* Set queue context for the default queue */ - defq_ctx = &pkt_shm->def_q_ctx; - ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" - "default-Q:%" PRI_QUEUE "", ret, def_queue); - - /* Set the pktout queues to use for the default queue, one per if */ - set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); - - if (QUEUE_PER_FLOW) - create_queue_per_flow(eo, eo_ctx); - - APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); - - return EM_OK; -} - -/** - * Helper func for EO start() to create a queue per packet flow (if configured) - */ -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) -{ - uint16_t port_offset = (uint16_t)-1; - uint32_t q_ctx_idx = 0; - queue_context_t *q_ctx; - em_queue_type_t qtype; - em_queue_t queue; - em_status_t ret; - int i, j; - - memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); - - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - - if (!QUEUE_TYPE_MIX) { - /* Use only queues of a single type */ - qtype = QUEUE_TYPE; - } else { - /* Spread out over the 3 diff queue-types */ - int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 3; - - if (nbr_q == 0) - qtype = EM_QUEUE_TYPE_ATOMIC; - else if (nbr_q == 1) - qtype = EM_QUEUE_TYPE_PARALLEL; - else - qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; - } - - /* Create a queue */ - queue = em_queue_create("udp-flow", qtype, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "Queue create failed: UDP-port %d", - udp_port); - /* - * Store the id of the created queue into the - * application specific EO-context - */ - eo_ctx->queue[q_ctx_idx] = queue; - - /* Set queue specific appl (EO) context */ - q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; - /* Save flow params */ - q_ctx->flow_params.ipv4 = ip_addr; - q_ctx->flow_params.port = udp_port; - q_ctx->flow_params.proto = IPV4_PROTO_UDP; - q_ctx->queue = queue; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue); - - /* Add the queue to the EO */ - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo, queue); - - /* - * Set the pktout queues to use for this input queue, - * one pktout queue per interface. - */ - set_pktout_queues(queue, eo_ctx, - q_ctx->pktout_queue/*out*/); - - /* Update the Queue Context Index */ - q_ctx_idx++; - test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, - "Too many queues!"); - } - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]) -{ - int if_count = eo_ctx->if_count; - int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = eo_ctx->if_ids[i]; - pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; - } -} - -/** - * EO Local start function (run once at startup on EACH core) - - * Not really needed in this application, but included - * to demonstrate usage. - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - - APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", - eo, eo_ctx->name, em_core_id()); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - em_status_t ret; - em_queue_t pktout_queue; - int if_id; - int i, j; - - APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - /* Delete the packet output queues created for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = eo_ctx->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } - - return EM_OK; -} - -/** - * EO event receive function - */ -static void -receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - queue_context_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - em_status_t status; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - in_port = pktio_input_port(event); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - if (ENABLE_ERROR_CHECKS) { - eo_context_t *const eo_ctx = eo_context; - - if (rx_error_check(eo_ctx, event, queue, q_ctx) != 0) - return; - } - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - pktio_swap_addrs(event); - - if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ - event = alloc_copy_free(event); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - status = em_send(event, pktout_queue); - if (unlikely(status != EM_OK)) - em_free(event); -} - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx) -{ - static ENV_LOCAL uint64_t drop_cnt = 1; - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - if (QUEUE_PER_FLOW) { - flow_params_t *fp; - - /* Drop everything from the default queue */ - if (unlikely(queue == eo_ctx->default_queue)) { - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - - APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", - ip_str, port_dst, em_core_id(), drop_cnt++); - - em_free(event); - return -1; - } - - /* - * Check IP address and port: compare packet against the stored - * values in the queue context - */ - fp = &q_ctx->flow_params; - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - } else { - if (unlikely(proto != IPV4_PROTO_UDP)) { - APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", - em_core_id(), drop_cnt++); - em_free(event); - return -1; - } - - test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || - ipv4_dst >= - (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || - port_dst < UDP_PORT_BASE || - port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || - proto != IPV4_PROTO_UDP, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Values not in the configurated range!\n" - "Abort!", - queue, ipv4_dst, port_dst, proto); - } - - /* Everything OK, return zero */ - return 0; -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Simple Load Balanced Packet-IO test application. + * + * An application (EO) that receives UDP datagrams and exchanges + * the src-dst addesses before sending the datagram back out. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ + +/** + * Set the used queue type for EM queues receiving packet data. + * + * Try also with EM_QUEUE_TYPE_PARALLEL or EM_QUEUE_TYPE_PARALLEL_ORDERED. + * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. + */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ + +/** + * Test with all different queue types simultaneously: + * ATOMIC, PARALLELL, PARALLEL_ORDERED + */ +#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ + +/** + * Create an EM queue per UDP/IP flow or use the default queue. + * + * If set to '0' then all traffic is routed through one 'default queue'(slow), + * if set to '1' each traffic flow is routed to its own EM-queue. + */ +#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ + +/** + * Select whether the UDP ports should be unique over all the IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured + * once for each IP-interface). Using '0' (not unique) makes it easier to + * copy traffic generator settings from one IF-port to another as only the + * dst-IP address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** + * Select whether the input and output ports should be cross-connected. + */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** + * Enable per packet error checking + */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +/* Configure the IP addresses and UDP ports that this application will use */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 64 + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 +/* + * IANA Dynamic Ports (Private or Ephemeral Ports), + * from 49152 to 65535 (never assigned) + */ +/* #define UDP_PORT_BASE 0xC000 */ + +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ + +#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +/** + * EO context + */ +typedef struct { + em_eo_t eo; + char name[32]; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /** default queue: pkts/events not matching any other input criteria */ + em_queue_t default_queue; + /** all created input queues */ + em_queue_t queue[NUM_PKTIN_QUEUES]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue-Context, i.e. queue specific data, each queue has its own instance + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** queue handle */ + em_queue_t queue; +} queue_context_t; + +/** + * Packet Loopback shared memory + */ +typedef struct { + /** EO (application) context */ + eo_context_t eo_ctx; + /** + * Array containing the contexts of all the queues handled by the EO. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; + + /** Queue context for the default queue */ + queue_context_t def_q_ctx; +} packet_loopback_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL packet_loopback_shm_t *pkt_shm; + +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); + +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context); + +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Loopback test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktLoopShMem", + sizeof(packet_loopback_shm_t)); + em_register_error_handler(test_error_handler); + } else { + pkt_shm = env_shared_lookup("PktLoopShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Loopback init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); +} + +/** + * Startup of the Packet Loopback test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + eo_context_t *eo_ctx; + em_status_t ret, start_fn_ret = EM_ERROR; + int if_id, i; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* + * Create one EO + */ + eo_ctx = &pkt_shm->eo_ctx; + /* Initialize EO context data to '0' */ + memset(eo_ctx, 0, sizeof(eo_context_t)); + + eo = em_eo_create(appl_conf->name, start_eo, start_eo_local, + stop_eo, NULL, receive_eo_packet, eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); + eo_ctx->eo = eo; + + /* Store the number of pktio interfaces used */ + eo_ctx->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + eo_ctx->if_ids[i] = if_id; + } + + /* Start the EO - queues etc. created in the EO start function */ + ret = em_eo_start_sync(eo, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * All input & output queues have been created and enabled in the + * EO start function, now direct pktio traffic to those queues. + */ + for (i = 0; i < NUM_PKTIN_QUEUES; i++) { + /* Direct ip_addr:udp_port into this queue */ + queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; + uint32_t ip_addr = q_ctx->flow_params.ipv4; + uint16_t port = q_ctx->flow_params.port; + uint8_t proto = q_ctx->flow_params.proto; + em_queue_t queue = q_ctx->queue; + em_queue_t tmp_q; + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + pktio_add_queue(proto, ip_addr, port, queue); + + /* Sanity checks (lookup what was configured) */ + tmp_q = pktio_lookup_sw(proto, ip_addr, port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, port, queue, tmp_q); + /* Print first and last mapping */ + if (i == 0 || i == NUM_PKTIN_QUEUES - 1) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, port, tmp_q); + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(eo_ctx->default_queue); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; + em_eo_t eo = eo_ctx->eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + * + * The global start function creates the application specific queues and + * associates the queues with the EO and the packet flows it wants to process. + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_queue_t def_queue, pktout_queue; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + em_status_t ret; + eo_context_t *const eo_ctx = eo_context; + queue_context_t *defq_ctx; + int if_id; + int i, j; + + (void)conf; + + /* Store the EO name in the EO-context data */ + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", + eo, eo_ctx->name, eo_ctx->if_count); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + eo_ctx->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + eo_ctx->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* + * Default queue for all packets not mathing any + * specific input queue criteria + */ + def_queue = em_queue_create("default", QUEUE_TYPE, EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(def_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed"); + + /* Store the default queue Id in the EO-context data */ + eo_ctx->default_queue = def_queue; + + /* Associate the queue with this EO */ + ret = em_eo_add_queue_sync(eo, def_queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo, def_queue); + + /* Set queue context for the default queue */ + defq_ctx = &pkt_shm->def_q_ctx; + ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" + "default-Q:%" PRI_QUEUE "", ret, def_queue); + + /* Set the pktout queues to use for the default queue, one per if */ + set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); + + if (QUEUE_PER_FLOW) + create_queue_per_flow(eo, eo_ctx); + + APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); + + return EM_OK; +} + +/** + * Helper func for EO start() to create a queue per packet flow (if configured) + */ +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) +{ + uint16_t port_offset = (uint16_t)-1; + uint32_t q_ctx_idx = 0; + queue_context_t *q_ctx; + em_queue_type_t qtype; + em_queue_t queue; + em_status_t ret; + int i, j; + + memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); + + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + + if (!QUEUE_TYPE_MIX) { + /* Use only queues of a single type */ + qtype = QUEUE_TYPE; + } else { + /* Spread out over the 3 diff queue-types */ + int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 3; + + if (nbr_q == 0) + qtype = EM_QUEUE_TYPE_ATOMIC; + else if (nbr_q == 1) + qtype = EM_QUEUE_TYPE_PARALLEL; + else + qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; + } + + /* Create a queue */ + queue = em_queue_create("udp-flow", qtype, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue create failed: UDP-port %d", + udp_port); + /* + * Store the id of the created queue into the + * application specific EO-context + */ + eo_ctx->queue[q_ctx_idx] = queue; + + /* Set queue specific appl (EO) context */ + q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; + /* Save flow params */ + q_ctx->flow_params.ipv4 = ip_addr; + q_ctx->flow_params.port = udp_port; + q_ctx->flow_params.proto = IPV4_PROTO_UDP; + q_ctx->queue = queue; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue); + + /* Add the queue to the EO */ + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo, queue); + + /* + * Set the pktout queues to use for this input queue, + * one pktout queue per interface. + */ + set_pktout_queues(queue, eo_ctx, + q_ctx->pktout_queue/*out*/); + + /* Update the Queue Context Index */ + q_ctx_idx++; + test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, + "Too many queues!"); + } + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]) +{ + int if_count = eo_ctx->if_count; + int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = eo_ctx->if_ids[i]; + pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; + } +} + +/** + * EO Local start function (run once at startup on EACH core) + + * Not really needed in this application, but included + * to demonstrate usage. + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + + APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", + eo, eo_ctx->name, em_core_id()); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + em_status_t ret; + em_queue_t pktout_queue; + int if_id; + int i, j; + + APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + /* Delete the packet output queues created for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = eo_ctx->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } + + return EM_OK; +} + +/** + * EO event receive function + */ +static void +receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + queue_context_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + em_status_t status; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + in_port = pktio_input_port(event); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + if (ENABLE_ERROR_CHECKS) { + eo_context_t *const eo_ctx = eo_context; + + if (rx_error_check(eo_ctx, event, queue, q_ctx) != 0) + return; + } + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + pktio_swap_addrs(event); + + if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ + event = alloc_copy_free(event); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + status = em_send(event, pktout_queue); + if (unlikely(status != EM_OK)) + em_free(event); +} + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx) +{ + static ENV_LOCAL uint64_t drop_cnt = 1; + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + if (QUEUE_PER_FLOW) { + flow_params_t *fp; + + /* Drop everything from the default queue */ + if (unlikely(queue == eo_ctx->default_queue)) { + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + + APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", + ip_str, port_dst, em_core_id(), drop_cnt++); + + em_free(event); + return -1; + } + + /* + * Check IP address and port: compare packet against the stored + * values in the queue context + */ + fp = &q_ctx->flow_params; + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + } else { + if (unlikely(proto != IPV4_PROTO_UDP)) { + APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", + em_core_id(), drop_cnt++); + em_free(event); + return -1; + } + + test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || + ipv4_dst >= + (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || + port_dst < UDP_PORT_BASE || + port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || + proto != IPV4_PROTO_UDP, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Values not in the configurated range!\n" + "Abort!", + queue, ipv4_dst, port_dst, proto); + } + + /* Everything OK, return zero */ + return 0; +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} diff --git a/programs/packet_io/loopback_ag.c b/programs/packet_io/loopback_ag.c index 68472e3a..de491002 100644 --- a/programs/packet_io/loopback_ag.c +++ b/programs/packet_io/loopback_ag.c @@ -1,808 +1,818 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Simple Load Balanced Packet-IO test application with - * Event Machine (EM) Atomic Groups (AG) - * - * An application (EO) that receives UDP datagrams and exchanges - * the src-dst addesses before sending the datagram back out. - * Each set of four input EM queues with prios Highest, High, - * Normal and Low are mapped into an EM atomic group to provide - * "atomic context with priority". - * Similar to normal atomic queues, atomic groups provide EOs with atomic - * processing context, but expands the context over multiple queues, - * i.e. over all the queues in the same atomic group. - * All queues in an atomic group are by default of type "atomic". - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ - -/** - * The number of queues that belong to each atomic group. - */ -#define QUEUES_PER_ATOMIC_GROUP 4 - -/** - * Select whether the UDP ports should be unique over all the IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured once - * for each IP-interface). Using '0' (not unique) makes it easier to copy - * traffic generator settings from one IF-port to another as only the dst-IP - * address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** - * Select whether the input and output ports should be cross-connected. - */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** - * Enable per packet error checking - */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -/* Configure the IP addresses and UDP ports that this application will use */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 64 - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 -/* - * IANA Dynamic Ports (Private or Ephemeral Ports), - * from 49152 to 65535 (never assigned) - */ -/* #define UDP_PORT_BASE 0xC000 */ - -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ - -#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES -#define MAX_ATOMIC_GROUPS (ROUND_UP(NUM_PKTIN_QUEUES, QUEUES_PER_ATOMIC_GROUP)\ - / QUEUES_PER_ATOMIC_GROUP) - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -/** - * EO context - */ -typedef struct { - em_eo_t eo; - char name[32]; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /** all created atomic groups */ - em_atomic_group_t agrps[MAX_ATOMIC_GROUPS]; - /** default queue: pkts/events not matching any other input criteria */ - em_queue_t default_queue; - /** all created input queues */ - em_queue_t queue[NUM_PKTIN_QUEUES]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue-Context, i.e. queue specific data, each queue has its own instance - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** queue handle */ - em_queue_t queue; -} queue_context_t; - -/** - * Packet Loopback shared memory - */ -typedef struct { - /** EO (application) context */ - eo_context_t eo_ctx; - /** - * Array containing the contexts of all the queues handled by the EO. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; - - /** Queue context for the default queue */ - queue_context_t def_q_ctx; -} packet_loopback_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL packet_loopback_shm_t *pkt_shm; - -static void -create_atomic_group_queues(const em_eo_t eo, eo_context_t *const eo_ctx); - -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context); - -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Loopback test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktLoopShMem", - sizeof(packet_loopback_shm_t)); - em_register_error_handler(test_error_handler); - } else { - pkt_shm = env_shared_lookup("PktLoopShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Loopback AG init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); -} - -/** - * Startup of the Packet Loopback test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - eo_context_t *eo_ctx; - em_status_t ret, start_fn_ret = EM_ERROR; - int if_id, i; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* - * Create one EO - */ - eo_ctx = &pkt_shm->eo_ctx; - /* Initialize EO context data to '0' */ - memset(eo_ctx, 0, sizeof(eo_context_t)); - - eo = em_eo_create(appl_conf->name, start_eo, start_eo_local, - stop_eo, NULL, receive_eo_packet, eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); - eo_ctx->eo = eo; - - /* Store the number of pktio interfaces used */ - eo_ctx->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - eo_ctx->if_ids[i] = if_id; - } - - /* Start the EO - queues etc. created in the EO start function */ - ret = em_eo_start_sync(eo, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * All input & output queues have been created and enabled in the - * EO start function, now direct pktio traffic to those queues. - */ - for (i = 0; i < NUM_PKTIN_QUEUES; i++) { - /* Direct ip_addr:udp_port into this queue */ - queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; - uint32_t ip_addr = q_ctx->flow_params.ipv4; - uint16_t port = q_ctx->flow_params.port; - uint8_t proto = q_ctx->flow_params.proto; - em_queue_t queue = q_ctx->queue; - em_queue_t tmp_q; - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - pktio_add_queue(proto, ip_addr, port, queue); - - /* Sanity checks (lookup what was configured) */ - tmp_q = pktio_lookup_sw(proto, ip_addr, port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, port, queue, tmp_q); - /* Print first and last mapping */ - if (i == 0 || i == NUM_PKTIN_QUEUES - 1) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, port, tmp_q); - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(eo_ctx->default_queue); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; - em_eo_t eo = eo_ctx->eo; - em_status_t ret; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - * - * The global start function creates the application specific queues and - * associates the queues with the EO and the packet flows it wants to process. - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - em_queue_t def_queue, pktout_queue; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - em_status_t ret; - eo_context_t *const eo_ctx = eo_context; - queue_context_t *defq_ctx; - int if_id; - int i, j; - - (void)conf; - - /* Store the EO name in the EO-context data */ - em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); - - APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", - eo, eo_ctx->name, eo_ctx->if_count); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - eo_ctx->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - eo_ctx->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* - * Default queue for all packets not mathing any - * specific input queue criteria - * Note: The queue type is EM_QUEUE_TYPE_PARALLEL ! - */ - def_queue = em_queue_create("default", EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(def_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed"); - - /* Store the default queue Id in the EO-context data */ - eo_ctx->default_queue = def_queue; - - /* Associate the queue with this EO */ - ret = em_eo_add_queue_sync(eo, def_queue); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo, def_queue); - - /* Set queue context for the default queue */ - defq_ctx = &pkt_shm->def_q_ctx; - ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" - "default-Q:%" PRI_QUEUE "", ret, def_queue); - - /* Set the pktout queues to use for the default queue, one per if */ - set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); - - /* - * Create the atomic groups and the queues in each atomic group. - * Each queue is connected to an input UDP-flow - */ - create_atomic_group_queues(eo, eo_ctx); - - APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); - - return EM_OK; -} - -/** - * Create the atomic groups and the queues in each atomic group. - * Each queue is connected to an input UDP-flow - * Called by EO-start - */ -static void -create_atomic_group_queues(const em_eo_t eo, eo_context_t *const eo_ctx) -{ - uint16_t port_offset = (uint16_t)-1; - uint32_t q_idx = 0, agrp_idx = 0; - queue_context_t *q_ctx; - em_queue_t queue; - em_atomic_group_t atomic_group = EM_ATOMIC_GROUP_UNDEF; - em_queue_prio_t ag_queue_priorities[] = {EM_QUEUE_PRIO_HIGHEST, - EM_QUEUE_PRIO_HIGH, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_PRIO_LOW, - /*end marker:*/ EM_QUEUE_PRIO_UNDEF}; - int ag_prio_idx = 0; /* index into ag_queue_priorities[] */ - em_queue_prio_t q_prio; - em_status_t ret; - int i, j; - - memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); - - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - - /* - * Determine whether to create a new atomic group. - * One atomic group contains 'QUEUES_PER_ATOMIC_GROUP' - * number of queues. - */ - if (q_idx % QUEUES_PER_ATOMIC_GROUP == 0) { - atomic_group = - em_atomic_group_create("PktLB_AG", - EM_QUEUE_GROUP_DEFAULT); - test_fatal_if(atomic_group == - EM_ATOMIC_GROUP_UNDEF, - "Atomic group creation failed!"); - eo_ctx->agrps[agrp_idx++] = atomic_group; - test_fatal_if(agrp_idx > MAX_ATOMIC_GROUPS, - "Too many atomic groups!"); - } - /* Get prio for the next queue in the atomic group */ - q_prio = ag_queue_priorities[ag_prio_idx++]; - if (q_prio == EM_QUEUE_PRIO_UNDEF/*end marker*/) { - ag_prio_idx = 0; - q_prio = ag_queue_priorities[ag_prio_idx++]; - } - /* - * Create a queue into the atomic group. Each queue in - * the atomic group has different priority. - */ - queue = em_queue_create_ag("udp-flow", q_prio, - atomic_group, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "AG queue-create failed:\n" - "UDP-port:%d AG:%" PRI_AGRP "", - udp_port, atomic_group); - - /* - * Store the id of the created queue into the - * application specific EO-context - */ - eo_ctx->queue[q_idx] = queue; - - /* Set the queue specific application (EO) context */ - q_ctx = &pkt_shm->eo_q_ctx[q_idx]; - /* Save flow params */ - q_ctx->flow_params.ipv4 = ip_addr; - q_ctx->flow_params.port = udp_port; - q_ctx->flow_params.proto = IPV4_PROTO_UDP; - q_ctx->queue = queue; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_idx, queue); - - /* Add the queue to the EO */ - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo, queue); - - /* - * Set the pktout queues to use for this input queue, - * one pktout queue per interface. - */ - set_pktout_queues(queue, eo_ctx, - q_ctx->pktout_queue/*out*/); - - /* Update the Queue Index */ - q_idx++; - test_fatal_if(q_idx > NUM_PKTIN_QUEUES, - "Too many queues!"); - } - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]) -{ - int if_count = eo_ctx->if_count; - int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = eo_ctx->if_ids[i]; - pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; - } -} - -/** - * EO Local start function (run once at startup on EACH core) - - * Not really needed in this application, but included - * to demonstrate usage. - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - - APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", - eo, eo_ctx->name, em_core_id()); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - em_status_t ret; - em_atomic_group_t agrp; - em_queue_t pktout_queue; - int if_id; - int i, j; - - APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - for (i = 0; i < MAX_ATOMIC_GROUPS; i++) { - agrp = eo_ctx->agrps[i]; - if (agrp == EM_ATOMIC_GROUP_UNDEF) - break; - em_atomic_group_delete(agrp); - } - - /* Delete the packet output queues created for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = eo_ctx->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } - - return EM_OK; -} - -/** - * EO event receive function - */ -static void -receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - em_status_t status; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(queue == eo_ctx->default_queue)) - APPL_PRINT("queue default: %" PRI_QUEUE "\n", queue); - - in_port = pktio_input_port(event); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - if (ENABLE_ERROR_CHECKS) { - if (rx_error_check(eo_ctx, event, queue, q_ctx) != 0) - return; - } - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - pktio_swap_addrs(event); - - if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ - event = alloc_copy_free(event); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - status = em_send(event, pktout_queue); - if (unlikely(status != EM_OK)) - em_free(event); -} - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx) -{ - static ENV_LOCAL uint64_t drop_cnt = 1; - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - flow_params_t *fp; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - /* - * Drop everything from the default queue - */ - if (unlikely(queue == eo_ctx->default_queue)) { - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - - APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", - ip_str, port_dst, em_core_id(), drop_cnt++); - - em_free(event); - return -1; - } - - /* - * Check IP address and port: compare packet against the stored values - * in the queue context - */ - fp = &q_ctx->flow_params; - - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - - /* Everything OK, return zero */ - return 0; -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Simple Load Balanced Packet-IO test application with + * Event Machine (EM) Atomic Groups (AG) + * + * An application (EO) that receives UDP datagrams and exchanges + * the src-dst addesses before sending the datagram back out. + * Each set of four input EM queues with prios Highest, High, + * Normal and Low are mapped into an EM atomic group to provide + * "atomic context with priority". + * Similar to normal atomic queues, atomic groups provide EOs with atomic + * processing context, but expands the context over multiple queues, + * i.e. over all the queues in the same atomic group. + * All queues in an atomic group are by default of type "atomic". + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ + +/** + * The number of queues that belong to each atomic group. + */ +#define QUEUES_PER_ATOMIC_GROUP 4 + +/** + * Select whether the UDP ports should be unique over all the IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured once + * for each IP-interface). Using '0' (not unique) makes it easier to copy + * traffic generator settings from one IF-port to another as only the dst-IP + * address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** + * Select whether the input and output ports should be cross-connected. + */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** + * Enable per packet error checking + */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +/* Configure the IP addresses and UDP ports that this application will use */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 64 + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 +/* + * IANA Dynamic Ports (Private or Ephemeral Ports), + * from 49152 to 65535 (never assigned) + */ +/* #define UDP_PORT_BASE 0xC000 */ + +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ + +#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES +#define MAX_ATOMIC_GROUPS (ROUND_UP(NUM_PKTIN_QUEUES, QUEUES_PER_ATOMIC_GROUP)\ + / QUEUES_PER_ATOMIC_GROUP) + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +/** + * EO context + */ +typedef struct { + em_eo_t eo; + char name[32]; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /** all created atomic groups */ + em_atomic_group_t agrps[MAX_ATOMIC_GROUPS]; + /** default queue: pkts/events not matching any other input criteria */ + em_queue_t default_queue; + /** all created input queues */ + em_queue_t queue[NUM_PKTIN_QUEUES]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue-Context, i.e. queue specific data, each queue has its own instance + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** queue handle */ + em_queue_t queue; +} queue_context_t; + +/** + * Packet Loopback shared memory + */ +typedef struct { + /** EO (application) context */ + eo_context_t eo_ctx; + /** + * Array containing the contexts of all the queues handled by the EO. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; + + /** Queue context for the default queue */ + queue_context_t def_q_ctx; +} packet_loopback_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL packet_loopback_shm_t *pkt_shm; + +static void +create_atomic_group_queues(const em_eo_t eo, eo_context_t *const eo_ctx); + +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context); + +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Loopback test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktLoopShMem", + sizeof(packet_loopback_shm_t)); + em_register_error_handler(test_error_handler); + } else { + pkt_shm = env_shared_lookup("PktLoopShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Loopback AG init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); +} + +/** + * Startup of the Packet Loopback test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + eo_context_t *eo_ctx; + em_status_t ret, start_fn_ret = EM_ERROR; + int if_id, i; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* + * Create one EO + */ + eo_ctx = &pkt_shm->eo_ctx; + /* Initialize EO context data to '0' */ + memset(eo_ctx, 0, sizeof(eo_context_t)); + + eo = em_eo_create(appl_conf->name, start_eo, start_eo_local, + stop_eo, NULL, receive_eo_packet, eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); + eo_ctx->eo = eo; + + /* Store the number of pktio interfaces used */ + eo_ctx->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + eo_ctx->if_ids[i] = if_id; + } + + /* Start the EO - queues etc. created in the EO start function */ + ret = em_eo_start_sync(eo, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * All input & output queues have been created and enabled in the + * EO start function, now direct pktio traffic to those queues. + */ + for (i = 0; i < NUM_PKTIN_QUEUES; i++) { + /* Direct ip_addr:udp_port into this queue */ + queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; + uint32_t ip_addr = q_ctx->flow_params.ipv4; + uint16_t port = q_ctx->flow_params.port; + uint8_t proto = q_ctx->flow_params.proto; + em_queue_t queue = q_ctx->queue; + em_queue_t tmp_q; + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + pktio_add_queue(proto, ip_addr, port, queue); + + /* Sanity checks (lookup what was configured) */ + tmp_q = pktio_lookup_sw(proto, ip_addr, port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, port, queue, tmp_q); + /* Print first and last mapping */ + if (i == 0 || i == NUM_PKTIN_QUEUES - 1) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, port, tmp_q); + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(eo_ctx->default_queue); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; + em_eo_t eo = eo_ctx->eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + * + * The global start function creates the application specific queues and + * associates the queues with the EO and the packet flows it wants to process. + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_queue_t def_queue, pktout_queue; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + em_status_t ret; + eo_context_t *const eo_ctx = eo_context; + queue_context_t *defq_ctx; + int if_id; + int i, j; + + (void)conf; + + /* Store the EO name in the EO-context data */ + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", + eo, eo_ctx->name, eo_ctx->if_count); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + eo_ctx->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + eo_ctx->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* + * Default queue for all packets not mathing any + * specific input queue criteria + * Note: The queue type is EM_QUEUE_TYPE_PARALLEL ! + */ + def_queue = em_queue_create("default", EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(def_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed"); + + /* Store the default queue Id in the EO-context data */ + eo_ctx->default_queue = def_queue; + + /* Associate the queue with this EO */ + ret = em_eo_add_queue_sync(eo, def_queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, def_queue); + + /* Set queue context for the default queue */ + defq_ctx = &pkt_shm->def_q_ctx; + ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" + "default-Q:%" PRI_QUEUE "", ret, def_queue); + + /* Set the pktout queues to use for the default queue, one per if */ + set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); + + /* + * Create the atomic groups and the queues in each atomic group. + * Each queue is connected to an input UDP-flow + */ + create_atomic_group_queues(eo, eo_ctx); + + APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); + + return EM_OK; +} + +/** + * Create the atomic groups and the queues in each atomic group. + * Each queue is connected to an input UDP-flow + * Called by EO-start + */ +static void +create_atomic_group_queues(const em_eo_t eo, eo_context_t *const eo_ctx) +{ + uint16_t port_offset = (uint16_t)-1; + uint32_t q_idx = 0, agrp_idx = 0; + queue_context_t *q_ctx; + em_queue_t queue; + em_atomic_group_t atomic_group = EM_ATOMIC_GROUP_UNDEF; + em_queue_prio_t ag_queue_priorities[] = {EM_QUEUE_PRIO_HIGHEST, + EM_QUEUE_PRIO_HIGH, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_PRIO_LOW, + /*end marker:*/ EM_QUEUE_PRIO_UNDEF}; + int ag_prio_idx = 0; /* index into ag_queue_priorities[] */ + em_queue_prio_t q_prio; + em_status_t ret; + int i, j; + + memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); + + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + + /* + * Determine whether to create a new atomic group. + * One atomic group contains 'QUEUES_PER_ATOMIC_GROUP' + * number of queues. + */ + if (q_idx % QUEUES_PER_ATOMIC_GROUP == 0) { + atomic_group = + em_atomic_group_create("PktLB_AG", + EM_QUEUE_GROUP_DEFAULT); + test_fatal_if(atomic_group == + EM_ATOMIC_GROUP_UNDEF, + "Atomic group creation failed!"); + eo_ctx->agrps[agrp_idx++] = atomic_group; + test_fatal_if(agrp_idx > MAX_ATOMIC_GROUPS, + "Too many atomic groups!"); + } + /* Get prio for the next queue in the atomic group */ + q_prio = ag_queue_priorities[ag_prio_idx++]; + if (q_prio == EM_QUEUE_PRIO_UNDEF/*end marker*/) { + ag_prio_idx = 0; + q_prio = ag_queue_priorities[ag_prio_idx++]; + } + /* + * Create a queue into the atomic group. Each queue in + * the atomic group has different priority. + */ + queue = em_queue_create_ag("udp-flow", q_prio, + atomic_group, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "AG queue-create failed:\n" + "UDP-port:%d AG:%" PRI_AGRP "", + udp_port, atomic_group); + + /* + * Store the id of the created queue into the + * application specific EO-context + */ + eo_ctx->queue[q_idx] = queue; + + /* Set the queue specific application (EO) context */ + q_ctx = &pkt_shm->eo_q_ctx[q_idx]; + /* Save flow params */ + q_ctx->flow_params.ipv4 = ip_addr; + q_ctx->flow_params.port = udp_port; + q_ctx->flow_params.proto = IPV4_PROTO_UDP; + q_ctx->queue = queue; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_idx, queue); + + /* Add the queue to the EO */ + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo, queue); + + /* + * Set the pktout queues to use for this input queue, + * one pktout queue per interface. + */ + set_pktout_queues(queue, eo_ctx, + q_ctx->pktout_queue/*out*/); + + /* Update the Queue Index */ + q_idx++; + test_fatal_if(q_idx > NUM_PKTIN_QUEUES, + "Too many queues!"); + } + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]) +{ + int if_count = eo_ctx->if_count; + int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = eo_ctx->if_ids[i]; + pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; + } +} + +/** + * EO Local start function (run once at startup on EACH core) + + * Not really needed in this application, but included + * to demonstrate usage. + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + + APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", + eo, eo_ctx->name, em_core_id()); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + em_status_t ret; + em_atomic_group_t agrp; + em_queue_t pktout_queue; + int if_id; + int i, j; + + APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + for (i = 0; i < MAX_ATOMIC_GROUPS; i++) { + agrp = eo_ctx->agrps[i]; + if (agrp == EM_ATOMIC_GROUP_UNDEF) + break; + em_atomic_group_delete(agrp); + } + + /* Delete the packet output queues created for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = eo_ctx->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } + + return EM_OK; +} + +/** + * EO event receive function + */ +static void +receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + em_status_t status; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(queue == eo_ctx->default_queue)) + APPL_PRINT("queue default: %" PRI_QUEUE "\n", queue); + + in_port = pktio_input_port(event); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + if (ENABLE_ERROR_CHECKS) { + if (rx_error_check(eo_ctx, event, queue, q_ctx) != 0) + return; + } + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + pktio_swap_addrs(event); + + if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ + event = alloc_copy_free(event); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + status = em_send(event, pktout_queue); + if (unlikely(status != EM_OK)) + em_free(event); +} + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx) +{ + static ENV_LOCAL uint64_t drop_cnt = 1; + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + flow_params_t *fp; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + /* + * Drop everything from the default queue + */ + if (unlikely(queue == eo_ctx->default_queue)) { + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + + APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", + ip_str, port_dst, em_core_id(), drop_cnt++); + + em_free(event); + return -1; + } + + /* + * Check IP address and port: compare packet against the stored values + * in the queue context + */ + fp = &q_ctx->flow_params; + + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + + /* Everything OK, return zero */ + return 0; +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} diff --git a/programs/packet_io/loopback_local.c b/programs/packet_io/loopback_local.c index 84d4360b..664903ee 100644 --- a/programs/packet_io/loopback_local.c +++ b/programs/packet_io/loopback_local.c @@ -1,819 +1,829 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Simple Load Balanced Packet-IO test application using local queues. - * - * An application (EO) that receives UDP datagrams and exchanges - * the src-dst addesses before sending the datagram back out. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ - -/** - * Set the used queue type for EM queues receiving packet data. - * - * Default: use EM_QUEUE_TYPE_LOCAL for max throughput by skipping - * load balancing and dynamic scheduling in favor of raw performance. - * - * Try also with EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL or - * EM_QUEUE_TYPE_PARALLEL_ORDERED. - * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. - */ -#define QUEUE_TYPE EM_QUEUE_TYPE_LOCAL -/* #define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ - -/** - * Test with all different queue types simultaneously: - * LOCAL, ATOMIC, PARALLELL, PARALLEL_ORDERED - */ -#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ - -/** - * Create an EM queue per UDP/IP flow or use the default queue. - * - * If set to '0' then all traffic is routed through one 'default queue'(slow), - * if set to '1' each traffic flow is routed to its own EM-queue. - */ -#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ - -/** - * Select whether the UDP ports should be unique over all the IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured - * once for each IP-interface). Using '0' (not unique) makes it easier to - * copy traffic generator settings from one IF-port to another as only the - * dst-IP address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** - * Select whether the input and output ports should be cross-connected. - */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** - * Enable per packet error checking - */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -/* Configure the IP addresses and UDP ports that this application will use */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 64 - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 -/* - * IANA Dynamic Ports (Private or Ephemeral Ports), - * from 49152 to 65535 (never assigned) - */ -/* #define UDP_PORT_BASE 0xC000 */ - -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ - -#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -/** - * EO context - */ -typedef struct { - em_eo_t eo; - char name[32]; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /** default queue: pkts/events not matching any other input criteria */ - em_queue_t default_queue; - /** all created input queues */ - em_queue_t queue[NUM_PKTIN_QUEUES]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue-Context, i.e. queue specific data, each queue has its own instance - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** queue handle */ - em_queue_t queue; -} queue_context_t; - -/** - * Packet Loopback shared memory - */ -typedef struct { - /** EO (application) context */ - eo_context_t eo_ctx; - /** - * Array containing the contexts of all the queues handled by the EO. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; - - /** Queue context for the default queue */ - queue_context_t def_q_ctx; -} packet_loopback_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL packet_loopback_shm_t *pkt_shm; - -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); - -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context); - -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Loopback test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktLoopShMem", - sizeof(packet_loopback_shm_t)); - em_register_error_handler(test_error_handler); - } else { - pkt_shm = env_shared_lookup("PktLoopShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Loopback init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); -} - -/** - * Startup of the Packet Loopback test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - eo_context_t *eo_ctx; - em_status_t ret, start_fn_ret = EM_ERROR; - int if_id, i; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* - * Create one EO - */ - eo_ctx = &pkt_shm->eo_ctx; - /* Initialize EO context data to '0' */ - memset(eo_ctx, 0, sizeof(eo_context_t)); - - eo = em_eo_create(appl_conf->name, start_eo, start_eo_local, - stop_eo, NULL, receive_eo_packet, eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); - eo_ctx->eo = eo; - - /* Store the number of pktio interfaces used */ - eo_ctx->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - eo_ctx->if_ids[i] = if_id; - } - - /* Start the EO - queues etc. created in the EO start function */ - ret = em_eo_start_sync(eo, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * All input & output queues have been created and enabled in the - * EO start function, now direct pktio traffic to those queues. - */ - for (i = 0; i < NUM_PKTIN_QUEUES; i++) { - /* Direct ip_addr:udp_port into this queue */ - queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; - uint32_t ip_addr = q_ctx->flow_params.ipv4; - uint16_t port = q_ctx->flow_params.port; - uint8_t proto = q_ctx->flow_params.proto; - em_queue_t queue = q_ctx->queue; - em_queue_t tmp_q; - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - pktio_add_queue(proto, ip_addr, port, queue); - - /* Sanity checks (lookup what was configured) */ - tmp_q = pktio_lookup_sw(proto, ip_addr, port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, port, queue, tmp_q); - /* Print first and last mapping */ - if (i == 0 || i == NUM_PKTIN_QUEUES - 1) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, port, tmp_q); - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(eo_ctx->default_queue); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; - em_eo_t eo = eo_ctx->eo; - em_status_t ret; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - * - * The global start function creates the application specific queues and - * associates the queues with the EO and the packet flows it wants to process. - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - em_queue_t def_queue, pktout_queue; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - em_queue_type_t queue_type; - em_queue_group_t queue_group; - em_status_t ret; - eo_context_t *const eo_ctx = eo_context; - queue_context_t *defq_ctx; - int if_id; - int i, j; - - (void)conf; - - /* Store the EO name in the EO-context data */ - em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); - - APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", - eo, eo_ctx->name, eo_ctx->if_count); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - eo_ctx->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - eo_ctx->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* - * Default queue for all packets not mathing any - * specific input queue criteria - */ - queue_type = QUEUE_TYPE; - if (queue_type == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - def_queue = em_queue_create("default", queue_type, EM_QUEUE_PRIO_NORMAL, - queue_group, NULL); - test_fatal_if(def_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed"); - - /* Store the default queue Id in the EO-context data */ - eo_ctx->default_queue = def_queue; - - /* Associate the queue with this EO */ - ret = em_eo_add_queue_sync(eo, def_queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo, def_queue); - - /* Set queue context for the default queue */ - defq_ctx = &pkt_shm->def_q_ctx; - ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" - "default-Q:%" PRI_QUEUE "", ret, def_queue); - - /* Set the pktout queues to use for the default queue, one per if */ - set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); - - if (QUEUE_PER_FLOW) - create_queue_per_flow(eo, eo_ctx); - - APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); - - return EM_OK; -} - -/** - * Helper func for EO start() to create a queue per packet flow (if configured) - */ -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) -{ - uint16_t port_offset = (uint16_t)-1; - uint32_t q_ctx_idx = 0; - queue_context_t *q_ctx; - em_queue_type_t qtype; - em_queue_group_t queue_group; - em_queue_t queue; - em_status_t ret; - int i, j; - - memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); - - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - - if (!QUEUE_TYPE_MIX) { - /* Use only queues of a single type */ - qtype = QUEUE_TYPE; - } else { - /* Spread out over the 4 diff queue-types */ - int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 4; - - if (nbr_q == 0) - qtype = EM_QUEUE_TYPE_LOCAL; - else if (nbr_q == 1) - qtype = EM_QUEUE_TYPE_ATOMIC; - else if (nbr_q == 2) - qtype = EM_QUEUE_TYPE_PARALLEL; - else - qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; - } - - /* Create a queue */ - if (qtype == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - queue = em_queue_create("udp-flow", qtype, - EM_QUEUE_PRIO_NORMAL, - queue_group, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "Queue create failed: UDP-port %d", - udp_port); - /* - * Store the id of the created queue into the - * application specific EO-context - */ - eo_ctx->queue[q_ctx_idx] = queue; - - /* Set queue specific appl (EO) context */ - q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; - /* Save flow params */ - q_ctx->flow_params.ipv4 = ip_addr; - q_ctx->flow_params.port = udp_port; - q_ctx->flow_params.proto = IPV4_PROTO_UDP; - q_ctx->queue = queue; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue); - - /* Add the queue to the EO */ - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo, queue); - - /* - * Set the pktout queues to use for this input queue, - * one pktout queue per interface. - */ - set_pktout_queues(queue, eo_ctx, - q_ctx->pktout_queue/*out*/); - - /* Update the Queue Context Index */ - q_ctx_idx++; - test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, - "Too many queues!"); - } - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]) -{ - int if_count = eo_ctx->if_count; - int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = eo_ctx->if_ids[i]; - pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; - } -} - -/** - * EO Local start function (run once at startup on EACH core) - - * Not really needed in this application, but included - * to demonstrate usage. - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - - APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", - eo, eo_ctx->name, em_core_id()); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - em_status_t ret; - em_queue_t pktout_queue; - int if_id; - int i, j; - - APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - /* Delete the packet output queues created for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = eo_ctx->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } - - return EM_OK; -} - -/** - * EO event receive function - */ -static void -receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - queue_context_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - em_status_t status; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - in_port = pktio_input_port(event); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - if (ENABLE_ERROR_CHECKS) { - eo_context_t *const eo_ctx = eo_context; - - if (rx_error_check(eo_ctx, event, queue, q_ctx) != 0) - return; - } - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - pktio_swap_addrs(event); - - if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ - event = alloc_copy_free(event); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - status = em_send(event, pktout_queue); - if (unlikely(status != EM_OK)) - em_free(event); -} - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx) -{ - static ENV_LOCAL uint64_t drop_cnt = 1; - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - if (QUEUE_PER_FLOW) { - flow_params_t *fp; - - /* Drop everything from the default queue */ - if (unlikely(queue == eo_ctx->default_queue)) { - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - - APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", - ip_str, port_dst, em_core_id(), drop_cnt++); - - em_free(event); - return -1; - } - - /* - * Check IP address and port: compare packet against the stored - * values in the queue context - */ - fp = &q_ctx->flow_params; - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - } else { - if (unlikely(proto != IPV4_PROTO_UDP)) { - APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", - em_core_id(), drop_cnt++); - em_free(event); - return -1; - } - - test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || - ipv4_dst >= - (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || - port_dst < UDP_PORT_BASE || - port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || - proto != IPV4_PROTO_UDP, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Values not in the configurated range!\n" - "Abort!", - queue, ipv4_dst, port_dst, proto); - } - - /* Everything OK, return zero */ - return 0; -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Simple Load Balanced Packet-IO test application using local queues. + * + * An application (EO) that receives UDP datagrams and exchanges + * the src-dst addesses before sending the datagram back out. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ + +/** + * Set the used queue type for EM queues receiving packet data. + * + * Default: use EM_QUEUE_TYPE_LOCAL for max throughput by skipping + * load balancing and dynamic scheduling in favor of raw performance. + * + * Try also with EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL or + * EM_QUEUE_TYPE_PARALLEL_ORDERED. + * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. + */ +#define QUEUE_TYPE EM_QUEUE_TYPE_LOCAL +/* #define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ + +/** + * Test with all different queue types simultaneously: + * LOCAL, ATOMIC, PARALLELL, PARALLEL_ORDERED + */ +#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ + +/** + * Create an EM queue per UDP/IP flow or use the default queue. + * + * If set to '0' then all traffic is routed through one 'default queue'(slow), + * if set to '1' each traffic flow is routed to its own EM-queue. + */ +#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ + +/** + * Select whether the UDP ports should be unique over all the IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured + * once for each IP-interface). Using '0' (not unique) makes it easier to + * copy traffic generator settings from one IF-port to another as only the + * dst-IP address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** + * Select whether the input and output ports should be cross-connected. + */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** + * Enable per packet error checking + */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +/* Configure the IP addresses and UDP ports that this application will use */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 64 + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 +/* + * IANA Dynamic Ports (Private or Ephemeral Ports), + * from 49152 to 65535 (never assigned) + */ +/* #define UDP_PORT_BASE 0xC000 */ + +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ + +#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +/** + * EO context + */ +typedef struct { + em_eo_t eo; + char name[32]; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /** default queue: pkts/events not matching any other input criteria */ + em_queue_t default_queue; + /** all created input queues */ + em_queue_t queue[NUM_PKTIN_QUEUES]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue-Context, i.e. queue specific data, each queue has its own instance + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** queue handle */ + em_queue_t queue; +} queue_context_t; + +/** + * Packet Loopback shared memory + */ +typedef struct { + /** EO (application) context */ + eo_context_t eo_ctx; + /** + * Array containing the contexts of all the queues handled by the EO. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; + + /** Queue context for the default queue */ + queue_context_t def_q_ctx; +} packet_loopback_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL packet_loopback_shm_t *pkt_shm; + +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); + +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context); + +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Loopback test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktLoopShMem", + sizeof(packet_loopback_shm_t)); + em_register_error_handler(test_error_handler); + } else { + pkt_shm = env_shared_lookup("PktLoopShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Loopback init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); +} + +/** + * Startup of the Packet Loopback test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + eo_context_t *eo_ctx; + em_status_t ret, start_fn_ret = EM_ERROR; + int if_id, i; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* + * Create one EO + */ + eo_ctx = &pkt_shm->eo_ctx; + /* Initialize EO context data to '0' */ + memset(eo_ctx, 0, sizeof(eo_context_t)); + + eo = em_eo_create(appl_conf->name, start_eo, start_eo_local, + stop_eo, NULL, receive_eo_packet, eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); + eo_ctx->eo = eo; + + /* Store the number of pktio interfaces used */ + eo_ctx->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + eo_ctx->if_ids[i] = if_id; + } + + /* Start the EO - queues etc. created in the EO start function */ + ret = em_eo_start_sync(eo, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * All input & output queues have been created and enabled in the + * EO start function, now direct pktio traffic to those queues. + */ + for (i = 0; i < NUM_PKTIN_QUEUES; i++) { + /* Direct ip_addr:udp_port into this queue */ + queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; + uint32_t ip_addr = q_ctx->flow_params.ipv4; + uint16_t port = q_ctx->flow_params.port; + uint8_t proto = q_ctx->flow_params.proto; + em_queue_t queue = q_ctx->queue; + em_queue_t tmp_q; + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + pktio_add_queue(proto, ip_addr, port, queue); + + /* Sanity checks (lookup what was configured) */ + tmp_q = pktio_lookup_sw(proto, ip_addr, port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, port, queue, tmp_q); + /* Print first and last mapping */ + if (i == 0 || i == NUM_PKTIN_QUEUES - 1) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, port, tmp_q); + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(eo_ctx->default_queue); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; + em_eo_t eo = eo_ctx->eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + * + * The global start function creates the application specific queues and + * associates the queues with the EO and the packet flows it wants to process. + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_queue_t def_queue, pktout_queue; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + em_queue_type_t queue_type; + em_queue_group_t queue_group; + em_status_t ret; + eo_context_t *const eo_ctx = eo_context; + queue_context_t *defq_ctx; + int if_id; + int i, j; + + (void)conf; + + /* Store the EO name in the EO-context data */ + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", + eo, eo_ctx->name, eo_ctx->if_count); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + eo_ctx->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + eo_ctx->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* + * Default queue for all packets not mathing any + * specific input queue criteria + */ + queue_type = QUEUE_TYPE; + if (queue_type == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + def_queue = em_queue_create("default", queue_type, EM_QUEUE_PRIO_NORMAL, + queue_group, NULL); + test_fatal_if(def_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed"); + + /* Store the default queue Id in the EO-context data */ + eo_ctx->default_queue = def_queue; + + /* Associate the queue with this EO */ + ret = em_eo_add_queue_sync(eo, def_queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, def_queue); + + /* Set queue context for the default queue */ + defq_ctx = &pkt_shm->def_q_ctx; + ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" + "default-Q:%" PRI_QUEUE "", ret, def_queue); + + /* Set the pktout queues to use for the default queue, one per if */ + set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); + + if (QUEUE_PER_FLOW) + create_queue_per_flow(eo, eo_ctx); + + APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); + + return EM_OK; +} + +/** + * Helper func for EO start() to create a queue per packet flow (if configured) + */ +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) +{ + uint16_t port_offset = (uint16_t)-1; + uint32_t q_ctx_idx = 0; + queue_context_t *q_ctx; + em_queue_type_t qtype; + em_queue_group_t queue_group; + em_queue_t queue; + em_status_t ret; + int i, j; + + memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); + + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + + if (!QUEUE_TYPE_MIX) { + /* Use only queues of a single type */ + qtype = QUEUE_TYPE; + } else { + /* Spread out over the 4 diff queue-types */ + int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 4; + + if (nbr_q == 0) + qtype = EM_QUEUE_TYPE_LOCAL; + else if (nbr_q == 1) + qtype = EM_QUEUE_TYPE_ATOMIC; + else if (nbr_q == 2) + qtype = EM_QUEUE_TYPE_PARALLEL; + else + qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; + } + + /* Create a queue */ + if (qtype == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + queue = em_queue_create("udp-flow", qtype, + EM_QUEUE_PRIO_NORMAL, + queue_group, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue create failed: UDP-port %d", + udp_port); + /* + * Store the id of the created queue into the + * application specific EO-context + */ + eo_ctx->queue[q_ctx_idx] = queue; + + /* Set queue specific appl (EO) context */ + q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; + /* Save flow params */ + q_ctx->flow_params.ipv4 = ip_addr; + q_ctx->flow_params.port = udp_port; + q_ctx->flow_params.proto = IPV4_PROTO_UDP; + q_ctx->queue = queue; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue); + + /* Add the queue to the EO */ + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo, queue); + + /* + * Set the pktout queues to use for this input queue, + * one pktout queue per interface. + */ + set_pktout_queues(queue, eo_ctx, + q_ctx->pktout_queue/*out*/); + + /* Update the Queue Context Index */ + q_ctx_idx++; + test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, + "Too many queues!"); + } + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]) +{ + int if_count = eo_ctx->if_count; + int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = eo_ctx->if_ids[i]; + pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; + } +} + +/** + * EO Local start function (run once at startup on EACH core) + + * Not really needed in this application, but included + * to demonstrate usage. + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + + APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", + eo, eo_ctx->name, em_core_id()); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + em_status_t ret; + em_queue_t pktout_queue; + int if_id; + int i, j; + + APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + /* Delete the packet output queues created for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = eo_ctx->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } + + return EM_OK; +} + +/** + * EO event receive function + */ +static void +receive_eo_packet(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + queue_context_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + em_status_t status; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + in_port = pktio_input_port(event); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + if (ENABLE_ERROR_CHECKS) { + eo_context_t *const eo_ctx = eo_context; + + if (rx_error_check(eo_ctx, event, queue, q_ctx) != 0) + return; + } + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + pktio_swap_addrs(event); + + if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ + event = alloc_copy_free(event); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + status = em_send(event, pktout_queue); + if (unlikely(status != EM_OK)) + em_free(event); +} + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx) +{ + static ENV_LOCAL uint64_t drop_cnt = 1; + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + if (QUEUE_PER_FLOW) { + flow_params_t *fp; + + /* Drop everything from the default queue */ + if (unlikely(queue == eo_ctx->default_queue)) { + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + + APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", + ip_str, port_dst, em_core_id(), drop_cnt++); + + em_free(event); + return -1; + } + + /* + * Check IP address and port: compare packet against the stored + * values in the queue context + */ + fp = &q_ctx->flow_params; + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + } else { + if (unlikely(proto != IPV4_PROTO_UDP)) { + APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", + em_core_id(), drop_cnt++); + em_free(event); + return -1; + } + + test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || + ipv4_dst >= + (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || + port_dst < UDP_PORT_BASE || + port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || + proto != IPV4_PROTO_UDP, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Values not in the configurated range!\n" + "Abort!", + queue, ipv4_dst, port_dst, proto); + } + + /* Everything OK, return zero */ + return 0; +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} diff --git a/programs/packet_io/loopback_local_multircv.c b/programs/packet_io/loopback_local_multircv.c index 97ee404d..07ead802 100644 --- a/programs/packet_io/loopback_local_multircv.c +++ b/programs/packet_io/loopback_local_multircv.c @@ -1,835 +1,845 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Simple Load Balanced Packet-IO test application using local queues and - * capable of receiving multiple events at a time (the EO is created with a - * multi-event receive function). - * - * The application (EO) receives a batch of UDP datagrams and exchanges - * the src-dst addesses before sending the datagrams back out. - * - * Based on lopback_local.c - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ - -/** - * Set the used queue type for EM queues receiving packet data. - * - * Default: use EM_QUEUE_TYPE_LOCAL for max throughput by skipping - * load balancing and dynamic scheduling in favor of raw performance. - * - * Try also with EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL or - * EM_QUEUE_TYPE_PARALLEL_ORDERED. - * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. - */ -#define QUEUE_TYPE EM_QUEUE_TYPE_LOCAL -/* #define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ - -/** - * Test with all different queue types simultaneously: - * LOCAL, ATOMIC, PARALLELL, PARALLEL_ORDERED - */ -#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ - -/** - * Create an EM queue per UDP/IP flow or use the default queue. - * - * If set to '0' then all traffic is routed through one 'default queue'(slow), - * if set to '1' each traffic flow is routed to its own EM-queue. - */ -#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ - -/** - * Select whether the UDP ports should be unique over all the IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured - * once for each IP-interface). Using '0' (not unique) makes it easier to - * copy traffic generator settings from one IF-port to another as only the - * dst-IP address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** - * Select whether the input and output ports should be cross-connected. - */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** - * Enable per packet error checking - */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -/* Configure the IP addresses and UDP ports that this application will use */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 64 - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 -/* - * IANA Dynamic Ports (Private or Ephemeral Ports), - * from 49152 to 65535 (never assigned) - */ -/* #define UDP_PORT_BASE 0xC000 */ - -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ - -#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -#define MAX_RCV_FN_EVENTS 256 - -/** - * EO context - */ -typedef struct { - em_eo_t eo; - char name[32]; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /** default queue: pkts/events not matching any other input criteria */ - em_queue_t default_queue; - /** all created input queues */ - em_queue_t queue[NUM_PKTIN_QUEUES]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue-Context, i.e. queue specific data, each queue has its own instance - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** queue handle */ - em_queue_t queue; -} queue_context_t; - -/** - * Packet Loopback shared memory - */ -typedef struct { - /** EO (application) context */ - eo_context_t eo_ctx; - /** - * Array containing the contexts of all the queues handled by the EO. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; - - /** Queue context for the default queue */ - queue_context_t def_q_ctx; -} packet_loopback_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL packet_loopback_shm_t *pkt_shm; - -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); - -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, - em_queue_t queue, void *queue_context); - -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Loopback test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktLoopShMem", - sizeof(packet_loopback_shm_t)); - em_register_error_handler(test_error_handler); - } else { - pkt_shm = env_shared_lookup("PktLoopShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Loopback init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); -} - -/** - * Startup of the Packet Loopback test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - em_eo_multircv_param_t eo_param; - eo_context_t *eo_ctx; - em_status_t ret, start_fn_ret = EM_ERROR; - int if_id, i; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* - * Create one EO - */ - eo_ctx = &pkt_shm->eo_ctx; - /* Initialize EO context data to '0' */ - memset(eo_ctx, 0, sizeof(eo_context_t)); - - /* Init EO params */ - em_eo_multircv_param_init(&eo_param); - /* Set EO params needed by this application */ - eo_param.start = start_eo; - eo_param.local_start = start_eo_local; - eo_param.stop = stop_eo; - eo_param.receive_multi = receive_eo_packet_multi; - eo_param.max_events = MAX_RCV_FN_EVENTS; - eo_param.eo_ctx = eo_ctx; - eo = em_eo_create_multircv(appl_conf->name, &eo_param); - test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); - eo_ctx->eo = eo; - - /* Store the number of pktio interfaces used */ - eo_ctx->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - eo_ctx->if_ids[i] = if_id; - } - - /* Start the EO - queues etc. created in the EO start function */ - ret = em_eo_start_sync(eo, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * All input & output queues have been created and enabled in the - * EO start function, now direct pktio traffic to those queues. - */ - for (i = 0; i < NUM_PKTIN_QUEUES; i++) { - /* Direct ip_addr:udp_port into this queue */ - queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; - uint32_t ip_addr = q_ctx->flow_params.ipv4; - uint16_t port = q_ctx->flow_params.port; - uint8_t proto = q_ctx->flow_params.proto; - em_queue_t queue = q_ctx->queue; - em_queue_t tmp_q; - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - pktio_add_queue(proto, ip_addr, port, queue); - - /* Sanity checks (lookup what was configured) */ - tmp_q = pktio_lookup_sw(proto, ip_addr, port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, port, queue, tmp_q); - /* Print first and last mapping */ - if (i == 0 || i == NUM_PKTIN_QUEUES - 1) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, port, tmp_q); - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(eo_ctx->default_queue); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; - em_eo_t eo = eo_ctx->eo; - em_status_t ret; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - * - * The global start function creates the application specific queues and - * associates the queues with the EO and the packet flows it wants to process. - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - em_queue_t def_queue, pktout_queue; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - em_queue_type_t queue_type; - em_queue_group_t queue_group; - em_status_t ret; - eo_context_t *const eo_ctx = eo_context; - queue_context_t *defq_ctx; - int if_id; - int i, j; - - (void)conf; - - /* Store the EO name in the EO-context data */ - em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); - - APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", - eo, eo_ctx->name, eo_ctx->if_count); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - eo_ctx->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - eo_ctx->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* - * Default queue for all packets not mathing any - * specific input queue criteria - */ - queue_type = QUEUE_TYPE; - if (queue_type == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - def_queue = em_queue_create("default", queue_type, EM_QUEUE_PRIO_NORMAL, - queue_group, NULL); - test_fatal_if(def_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed"); - - /* Store the default queue Id in the EO-context data */ - eo_ctx->default_queue = def_queue; - - /* Associate the queue with this EO */ - ret = em_eo_add_queue_sync(eo, def_queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo, def_queue); - - /* Set queue context for the default queue */ - defq_ctx = &pkt_shm->def_q_ctx; - ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" - "default-Q:%" PRI_QUEUE "", ret, def_queue); - - /* Set the pktout queues to use for the default queue, one per if */ - set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); - - if (QUEUE_PER_FLOW) - create_queue_per_flow(eo, eo_ctx); - - APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); - - return EM_OK; -} - -/** - * Helper func for EO start() to create a queue per packet flow (if configured) - */ -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) -{ - uint16_t port_offset = (uint16_t)-1; - uint32_t q_ctx_idx = 0; - queue_context_t *q_ctx; - em_queue_type_t qtype; - em_queue_group_t queue_group; - em_queue_t queue; - em_status_t ret; - int i, j; - - memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); - - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - - if (!QUEUE_TYPE_MIX) { - /* Use only queues of a single type */ - qtype = QUEUE_TYPE; - } else { - /* Spread out over the 4 diff queue-types */ - int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 4; - - if (nbr_q == 0) - qtype = EM_QUEUE_TYPE_LOCAL; - else if (nbr_q == 1) - qtype = EM_QUEUE_TYPE_ATOMIC; - else if (nbr_q == 2) - qtype = EM_QUEUE_TYPE_PARALLEL; - else - qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; - } - - /* Create a queue */ - if (qtype == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - queue = em_queue_create("udp-flow", qtype, - EM_QUEUE_PRIO_NORMAL, - queue_group, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "Queue create failed: UDP-port %d", - udp_port); - /* - * Store the id of the created queue into the - * application specific EO-context - */ - eo_ctx->queue[q_ctx_idx] = queue; - - /* Set queue specific appl (EO) context */ - q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; - /* Save flow params */ - q_ctx->flow_params.ipv4 = ip_addr; - q_ctx->flow_params.port = udp_port; - q_ctx->flow_params.proto = IPV4_PROTO_UDP; - q_ctx->queue = queue; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue); - - /* Add the queue to the EO */ - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo, queue); - - /* - * Set the pktout queues to use for this input queue, - * one pktout queue per interface. - */ - set_pktout_queues(queue, eo_ctx, - q_ctx->pktout_queue/*out*/); - - /* Update the Queue Context Index */ - q_ctx_idx++; - test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, - "Too many queues!"); - } - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]) -{ - int if_count = eo_ctx->if_count; - int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = eo_ctx->if_ids[i]; - pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; - } -} - -/** - * EO Local start function (run once at startup on EACH core) - - * Not really needed in this application, but included - * to demonstrate usage. - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - - APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", - eo, eo_ctx->name, em_core_id()); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - em_status_t ret; - em_queue_t pktout_queue; - int if_id; - int i, j; - - APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - /* Delete the packet output queues created for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = eo_ctx->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } - - return EM_OK; -} - -/** - * EO event receive function - */ -static void -receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, - em_queue_t queue, void *queue_context) -{ - queue_context_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - int ret, i; - - if (unlikely(appl_shm->exit_flag)) { - em_free_multi(event_tbl, num); - return; - } - - in_port = pktio_input_port(event_tbl[0]); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - if (ENABLE_ERROR_CHECKS) { - eo_context_t *const eo_ctx = eo_context; - - for (i = 0; i < num; i++) - if (rx_error_check(eo_ctx, event_tbl[i], - queue, q_ctx) != 0) - return; - } - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - for (i = 0; i < num; i++) - pktio_swap_addrs(event_tbl[i]); - - if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ - for (i = 0; i < num; i++) - event_tbl[i] = alloc_copy_free(event_tbl[i]); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - ret = em_send_multi(event_tbl, num, pktout_queue); - if (unlikely(ret != num)) - em_free_multi(&event_tbl[ret], num - ret); -} - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx) -{ - static ENV_LOCAL uint64_t drop_cnt = 1; - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - if (QUEUE_PER_FLOW) { - flow_params_t *fp; - - /* Drop everything from the default queue */ - if (unlikely(queue == eo_ctx->default_queue)) { - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - - APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", - ip_str, port_dst, em_core_id(), drop_cnt++); - - em_free(event); - return -1; - } - - /* - * Check IP address and port: compare packet against the stored - * values in the queue context - */ - fp = &q_ctx->flow_params; - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - } else { - if (unlikely(proto != IPV4_PROTO_UDP)) { - APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", - em_core_id(), drop_cnt++); - em_free(event); - return -1; - } - - test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || - ipv4_dst >= - (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || - port_dst < UDP_PORT_BASE || - port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || - proto != IPV4_PROTO_UDP, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Values not in the configurated range!\n" - "Abort!", - queue, ipv4_dst, port_dst, proto); - } - - /* Everything OK, return zero */ - return 0; -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Simple Load Balanced Packet-IO test application using local queues and + * capable of receiving multiple events at a time (the EO is created with a + * multi-event receive function). + * + * The application (EO) receives a batch of UDP datagrams and exchanges + * the src-dst addesses before sending the datagrams back out. + * + * Based on lopback_local.c + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ + +/** + * Set the used queue type for EM queues receiving packet data. + * + * Default: use EM_QUEUE_TYPE_LOCAL for max throughput by skipping + * load balancing and dynamic scheduling in favor of raw performance. + * + * Try also with EM_QUEUE_TYPE_ATOMIC, EM_QUEUE_TYPE_PARALLEL or + * EM_QUEUE_TYPE_PARALLEL_ORDERED. + * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. + */ +#define QUEUE_TYPE EM_QUEUE_TYPE_LOCAL +/* #define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ + +/** + * Test with all different queue types simultaneously: + * LOCAL, ATOMIC, PARALLELL, PARALLEL_ORDERED + */ +#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ + +/** + * Create an EM queue per UDP/IP flow or use the default queue. + * + * If set to '0' then all traffic is routed through one 'default queue'(slow), + * if set to '1' each traffic flow is routed to its own EM-queue. + */ +#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ + +/** + * Select whether the UDP ports should be unique over all the IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured + * once for each IP-interface). Using '0' (not unique) makes it easier to + * copy traffic generator settings from one IF-port to another as only the + * dst-IP address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** + * Select whether the input and output ports should be cross-connected. + */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** + * Enable per packet error checking + */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +/* Configure the IP addresses and UDP ports that this application will use */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 64 + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 +/* + * IANA Dynamic Ports (Private or Ephemeral Ports), + * from 49152 to 65535 (never assigned) + */ +/* #define UDP_PORT_BASE 0xC000 */ + +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ + +#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +#define MAX_RCV_FN_EVENTS 256 + +/** + * EO context + */ +typedef struct { + em_eo_t eo; + char name[32]; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /** default queue: pkts/events not matching any other input criteria */ + em_queue_t default_queue; + /** all created input queues */ + em_queue_t queue[NUM_PKTIN_QUEUES]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue-Context, i.e. queue specific data, each queue has its own instance + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** queue handle */ + em_queue_t queue; +} queue_context_t; + +/** + * Packet Loopback shared memory + */ +typedef struct { + /** EO (application) context */ + eo_context_t eo_ctx; + /** + * Array containing the contexts of all the queues handled by the EO. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; + + /** Queue context for the default queue */ + queue_context_t def_q_ctx; +} packet_loopback_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL packet_loopback_shm_t *pkt_shm; + +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); + +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context); + +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Loopback test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktLoopShMem", + sizeof(packet_loopback_shm_t)); + em_register_error_handler(test_error_handler); + } else { + pkt_shm = env_shared_lookup("PktLoopShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Loopback init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); +} + +/** + * Startup of the Packet Loopback test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + em_eo_multircv_param_t eo_param; + eo_context_t *eo_ctx; + em_status_t ret, start_fn_ret = EM_ERROR; + int if_id, i; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* + * Create one EO + */ + eo_ctx = &pkt_shm->eo_ctx; + /* Initialize EO context data to '0' */ + memset(eo_ctx, 0, sizeof(eo_context_t)); + + /* Init EO params */ + em_eo_multircv_param_init(&eo_param); + /* Set EO params needed by this application */ + eo_param.start = start_eo; + eo_param.local_start = start_eo_local; + eo_param.stop = stop_eo; + eo_param.receive_multi = receive_eo_packet_multi; + eo_param.max_events = MAX_RCV_FN_EVENTS; + eo_param.eo_ctx = eo_ctx; + eo = em_eo_create_multircv(appl_conf->name, &eo_param); + test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); + eo_ctx->eo = eo; + + /* Store the number of pktio interfaces used */ + eo_ctx->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + eo_ctx->if_ids[i] = if_id; + } + + /* Start the EO - queues etc. created in the EO start function */ + ret = em_eo_start_sync(eo, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * All input & output queues have been created and enabled in the + * EO start function, now direct pktio traffic to those queues. + */ + for (i = 0; i < NUM_PKTIN_QUEUES; i++) { + /* Direct ip_addr:udp_port into this queue */ + queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; + uint32_t ip_addr = q_ctx->flow_params.ipv4; + uint16_t port = q_ctx->flow_params.port; + uint8_t proto = q_ctx->flow_params.proto; + em_queue_t queue = q_ctx->queue; + em_queue_t tmp_q; + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + pktio_add_queue(proto, ip_addr, port, queue); + + /* Sanity checks (lookup what was configured) */ + tmp_q = pktio_lookup_sw(proto, ip_addr, port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, port, queue, tmp_q); + /* Print first and last mapping */ + if (i == 0 || i == NUM_PKTIN_QUEUES - 1) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, port, tmp_q); + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(eo_ctx->default_queue); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; + em_eo_t eo = eo_ctx->eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + * + * The global start function creates the application specific queues and + * associates the queues with the EO and the packet flows it wants to process. + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_queue_t def_queue, pktout_queue; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + em_queue_type_t queue_type; + em_queue_group_t queue_group; + em_status_t ret; + eo_context_t *const eo_ctx = eo_context; + queue_context_t *defq_ctx; + int if_id; + int i, j; + + (void)conf; + + /* Store the EO name in the EO-context data */ + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", + eo, eo_ctx->name, eo_ctx->if_count); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + eo_ctx->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + eo_ctx->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* + * Default queue for all packets not mathing any + * specific input queue criteria + */ + queue_type = QUEUE_TYPE; + if (queue_type == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + def_queue = em_queue_create("default", queue_type, EM_QUEUE_PRIO_NORMAL, + queue_group, NULL); + test_fatal_if(def_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed"); + + /* Store the default queue Id in the EO-context data */ + eo_ctx->default_queue = def_queue; + + /* Associate the queue with this EO */ + ret = em_eo_add_queue_sync(eo, def_queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, def_queue); + + /* Set queue context for the default queue */ + defq_ctx = &pkt_shm->def_q_ctx; + ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" + "default-Q:%" PRI_QUEUE "", ret, def_queue); + + /* Set the pktout queues to use for the default queue, one per if */ + set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); + + if (QUEUE_PER_FLOW) + create_queue_per_flow(eo, eo_ctx); + + APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); + + return EM_OK; +} + +/** + * Helper func for EO start() to create a queue per packet flow (if configured) + */ +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) +{ + uint16_t port_offset = (uint16_t)-1; + uint32_t q_ctx_idx = 0; + queue_context_t *q_ctx; + em_queue_type_t qtype; + em_queue_group_t queue_group; + em_queue_t queue; + em_status_t ret; + int i, j; + + memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); + + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + + if (!QUEUE_TYPE_MIX) { + /* Use only queues of a single type */ + qtype = QUEUE_TYPE; + } else { + /* Spread out over the 4 diff queue-types */ + int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 4; + + if (nbr_q == 0) + qtype = EM_QUEUE_TYPE_LOCAL; + else if (nbr_q == 1) + qtype = EM_QUEUE_TYPE_ATOMIC; + else if (nbr_q == 2) + qtype = EM_QUEUE_TYPE_PARALLEL; + else + qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; + } + + /* Create a queue */ + if (qtype == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + queue = em_queue_create("udp-flow", qtype, + EM_QUEUE_PRIO_NORMAL, + queue_group, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue create failed: UDP-port %d", + udp_port); + /* + * Store the id of the created queue into the + * application specific EO-context + */ + eo_ctx->queue[q_ctx_idx] = queue; + + /* Set queue specific appl (EO) context */ + q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; + /* Save flow params */ + q_ctx->flow_params.ipv4 = ip_addr; + q_ctx->flow_params.port = udp_port; + q_ctx->flow_params.proto = IPV4_PROTO_UDP; + q_ctx->queue = queue; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue); + + /* Add the queue to the EO */ + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo, queue); + + /* + * Set the pktout queues to use for this input queue, + * one pktout queue per interface. + */ + set_pktout_queues(queue, eo_ctx, + q_ctx->pktout_queue/*out*/); + + /* Update the Queue Context Index */ + q_ctx_idx++; + test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, + "Too many queues!"); + } + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]) +{ + int if_count = eo_ctx->if_count; + int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = eo_ctx->if_ids[i]; + pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; + } +} + +/** + * EO Local start function (run once at startup on EACH core) + + * Not really needed in this application, but included + * to demonstrate usage. + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + + APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", + eo, eo_ctx->name, em_core_id()); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + em_status_t ret; + em_queue_t pktout_queue; + int if_id; + int i, j; + + APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + /* Delete the packet output queues created for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = eo_ctx->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } + + return EM_OK; +} + +/** + * EO event receive function + */ +static void +receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context) +{ + queue_context_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + int ret, i; + + if (unlikely(appl_shm->exit_flag)) { + em_free_multi(event_tbl, num); + return; + } + + in_port = pktio_input_port(event_tbl[0]); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + if (ENABLE_ERROR_CHECKS) { + eo_context_t *const eo_ctx = eo_context; + + for (i = 0; i < num; i++) + if (rx_error_check(eo_ctx, event_tbl[i], + queue, q_ctx) != 0) + return; + } + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + for (i = 0; i < num; i++) + pktio_swap_addrs(event_tbl[i]); + + if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ + for (i = 0; i < num; i++) + event_tbl[i] = alloc_copy_free(event_tbl[i]); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + ret = em_send_multi(event_tbl, num, pktout_queue); + if (unlikely(ret != num)) + em_free_multi(&event_tbl[ret], num - ret); +} + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx) +{ + static ENV_LOCAL uint64_t drop_cnt = 1; + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + if (QUEUE_PER_FLOW) { + flow_params_t *fp; + + /* Drop everything from the default queue */ + if (unlikely(queue == eo_ctx->default_queue)) { + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + + APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", + ip_str, port_dst, em_core_id(), drop_cnt++); + + em_free(event); + return -1; + } + + /* + * Check IP address and port: compare packet against the stored + * values in the queue context + */ + fp = &q_ctx->flow_params; + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + } else { + if (unlikely(proto != IPV4_PROTO_UDP)) { + APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", + em_core_id(), drop_cnt++); + em_free(event); + return -1; + } + + test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || + ipv4_dst >= + (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || + port_dst < UDP_PORT_BASE || + port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || + proto != IPV4_PROTO_UDP, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Values not in the configurated range!\n" + "Abort!", + queue, ipv4_dst, port_dst, proto); + } + + /* Everything OK, return zero */ + return 0; +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} diff --git a/programs/packet_io/loopback_multircv.c b/programs/packet_io/loopback_multircv.c index b53a5a4e..c779049d 100644 --- a/programs/packet_io/loopback_multircv.c +++ b/programs/packet_io/loopback_multircv.c @@ -1,815 +1,825 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Simple Load Balanced Packet-IO test application capable of receiving multiple - * events at a time (the EO is created with a multi-event receive function) - * - * The application (EO) receives a batch of UDP datagrams and exchanges - * the src-dst addesses before sending the datagrams back out. - * - * Based on the loopback.c application - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ - -/** - * Set the used queue type for EM queues receiving packet data. - * - * Try also with EM_QUEUE_TYPE_PARALLEL or EM_QUEUE_TYPE_PARALLEL_ORDERED. - * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. - */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ - -/** - * Test with all different queue types simultaneously: - * ATOMIC, PARALLELL, PARALLEL_ORDERED - */ -#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ - -/** - * Create an EM queue per UDP/IP flow or use the default queue. - * - * If set to '0' then all traffic is routed through one 'default queue'(slow), - * if set to '1' each traffic flow is routed to its own EM-queue. - */ -#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ - -/** - * Select whether the UDP ports should be unique over all the IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured - * once for each IP-interface). Using '0' (not unique) makes it easier to - * copy traffic generator settings from one IF-port to another as only the - * dst-IP address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** - * Select whether the input and output ports should be cross-connected. - */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** - * Enable per packet error checking - */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -/* Configure the IP addresses and UDP ports that this application will use */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 64 - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 -/* - * IANA Dynamic Ports (Private or Ephemeral Ports), - * from 49152 to 65535 (never assigned) - */ -/* #define UDP_PORT_BASE 0xC000 */ - -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ - -#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -#define MAX_RCV_FN_EVENTS 256 - -/** - * EO context - */ -typedef struct { - em_eo_t eo; - char name[32]; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /** default queue: pkts/events not matching any other input criteria */ - em_queue_t default_queue; - /** all created input queues */ - em_queue_t queue[NUM_PKTIN_QUEUES]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue-Context, i.e. queue specific data, each queue has its own instance - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** queue handle */ - em_queue_t queue; -} queue_context_t; - -/** - * Packet Loopback shared memory - */ -typedef struct { - /** EO (application) context */ - eo_context_t eo_ctx; - /** - * Array containing the contexts of all the queues handled by the EO. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; - - /** Queue context for the default queue */ - queue_context_t def_q_ctx; -} packet_loopback_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL packet_loopback_shm_t *pkt_shm; - -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); - -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, - em_queue_t queue, void *queue_context); - -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Loopback test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktLoopShMem", - sizeof(packet_loopback_shm_t)); - em_register_error_handler(test_error_handler); - } else { - pkt_shm = env_shared_lookup("PktLoopShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Loopback init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); -} - -/** - * Startup of the Packet Loopback test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - em_eo_multircv_param_t eo_param; - eo_context_t *eo_ctx; - em_status_t ret, start_fn_ret = EM_ERROR; - int if_id, i; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* - * Create one EO - */ - eo_ctx = &pkt_shm->eo_ctx; - /* Initialize EO context data to '0' */ - memset(eo_ctx, 0, sizeof(eo_context_t)); - - /* Init EO params */ - em_eo_multircv_param_init(&eo_param); - /* Set EO params needed by this application */ - eo_param.start = start_eo; - eo_param.local_start = start_eo_local; - eo_param.stop = stop_eo; - eo_param.receive_multi = receive_eo_packet_multi; - eo_param.max_events = MAX_RCV_FN_EVENTS; - eo_param.eo_ctx = eo_ctx; - eo = em_eo_create_multircv(appl_conf->name, &eo_param); - test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); - eo_ctx->eo = eo; - - /* Store the number of pktio interfaces used */ - eo_ctx->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - eo_ctx->if_ids[i] = if_id; - } - - /* Start the EO - queues etc. created in the EO start function */ - ret = em_eo_start_sync(eo, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * All input & output queues have been created and enabled in the - * EO start function, now direct pktio traffic to those queues. - */ - for (i = 0; i < NUM_PKTIN_QUEUES; i++) { - /* Direct ip_addr:udp_port into this queue */ - queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; - uint32_t ip_addr = q_ctx->flow_params.ipv4; - uint16_t port = q_ctx->flow_params.port; - uint8_t proto = q_ctx->flow_params.proto; - em_queue_t queue = q_ctx->queue; - em_queue_t tmp_q; - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - pktio_add_queue(proto, ip_addr, port, queue); - - /* Sanity checks (lookup what was configured) */ - tmp_q = pktio_lookup_sw(proto, ip_addr, port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, port, queue, tmp_q); - /* Print first and last mapping */ - if (i == 0 || i == NUM_PKTIN_QUEUES - 1) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, port, tmp_q); - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(eo_ctx->default_queue); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; - em_eo_t eo = eo_ctx->eo; - em_status_t ret; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - * - * The global start function creates the application specific queues and - * associates the queues with the EO and the packet flows it wants to process. - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - em_queue_t def_queue, pktout_queue; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - em_status_t ret; - eo_context_t *const eo_ctx = eo_context; - queue_context_t *defq_ctx; - int if_id; - int i, j; - - (void)conf; - - /* Store the EO name in the EO-context data */ - em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); - - APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", - eo, eo_ctx->name, eo_ctx->if_count); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - eo_ctx->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - eo_ctx->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* - * Default queue for all packets not mathing any - * specific input queue criteria - */ - def_queue = em_queue_create("default", QUEUE_TYPE, EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(def_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed"); - - /* Store the default queue Id in the EO-context data */ - eo_ctx->default_queue = def_queue; - - /* Associate the queue with this EO */ - ret = em_eo_add_queue_sync(eo, def_queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo, def_queue); - - /* Set queue context for the default queue */ - defq_ctx = &pkt_shm->def_q_ctx; - ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" - "default-Q:%" PRI_QUEUE "", ret, def_queue); - - /* Set the pktout queues to use for the default queue, one per if */ - set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); - - if (QUEUE_PER_FLOW) - create_queue_per_flow(eo, eo_ctx); - - APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); - - return EM_OK; -} - -/** - * Helper func for EO start() to create a queue per packet flow (if configured) - */ -static void -create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) -{ - uint16_t port_offset = (uint16_t)-1; - uint32_t q_ctx_idx = 0; - queue_context_t *q_ctx; - em_queue_type_t qtype; - em_queue_t queue; - em_status_t ret; - int i, j; - - memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); - - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - - if (!QUEUE_TYPE_MIX) { - /* Use only queues of a single type */ - qtype = QUEUE_TYPE; - } else { - /* Spread out over the 3 diff queue-types */ - int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 3; - - if (nbr_q == 0) - qtype = EM_QUEUE_TYPE_ATOMIC; - else if (nbr_q == 1) - qtype = EM_QUEUE_TYPE_PARALLEL; - else - qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; - } - - /* Create a queue */ - queue = em_queue_create("udp-flow", qtype, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "Queue create failed: UDP-port %d", - udp_port); - /* - * Store the id of the created queue into the - * application specific EO-context - */ - eo_ctx->queue[q_ctx_idx] = queue; - - /* Set queue specific appl (EO) context */ - q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; - /* Save flow params */ - q_ctx->flow_params.ipv4 = ip_addr; - q_ctx->flow_params.port = udp_port; - q_ctx->flow_params.proto = IPV4_PROTO_UDP; - q_ctx->queue = queue; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "Set Q-ctx failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue); - - /* Add the queue to the EO */ - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo, queue); - - /* - * Set the pktout queues to use for this input queue, - * one pktout queue per interface. - */ - set_pktout_queues(queue, eo_ctx, - q_ctx->pktout_queue/*out*/); - - /* Update the Queue Context Index */ - q_ctx_idx++; - test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, - "Too many queues!"); - } - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, - em_queue_t pktout_queue[/*out*/]) -{ - int if_count = eo_ctx->if_count; - int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = eo_ctx->if_ids[i]; - pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; - } -} - -/** - * EO Local start function (run once at startup on EACH core) - - * Not really needed in this application, but included - * to demonstrate usage. - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - - APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", - eo, eo_ctx->name, em_core_id()); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - eo_context_t *eo_ctx = eo_context; - em_status_t ret; - em_queue_t pktout_queue; - int if_id; - int i, j; - - APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - /* Delete the packet output queues created for each interface */ - for (i = 0; i < eo_ctx->if_count; i++) { - if_id = eo_ctx->if_ids[i]; - for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = eo_ctx->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } - - return EM_OK; -} - -/** - * EO event receive function - */ -static void -receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, - em_queue_t queue, void *queue_context) -{ - queue_context_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - int ret, i; - - if (unlikely(appl_shm->exit_flag)) { - em_free_multi(event_tbl, num); - return; - } - - in_port = pktio_input_port(event_tbl[0]); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - if (ENABLE_ERROR_CHECKS) { - eo_context_t *const eo_ctx = eo_context; - - for (i = 0; i < num; i++) - if (rx_error_check(eo_ctx, event_tbl[i], - queue, q_ctx) != 0) - return; - } - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - for (i = 0; i < num; i++) - pktio_swap_addrs(event_tbl[i]); - - if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ - for (i = 0; i < num; i++) - event_tbl[i] = alloc_copy_free(event_tbl[i]); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - ret = em_send_multi(event_tbl, num, pktout_queue); - if (unlikely(ret != num)) - em_free_multi(&event_tbl[ret], num - ret); -} - -static inline int -rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, - const em_queue_t queue, queue_context_t *const q_ctx) -{ - static ENV_LOCAL uint64_t drop_cnt = 1; - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - if (QUEUE_PER_FLOW) { - flow_params_t *fp; - - /* Drop everything from the default queue */ - if (unlikely(queue == eo_ctx->default_queue)) { - char ip_str[sizeof("255.255.255.255")]; - - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - - APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", - ip_str, port_dst, em_core_id(), drop_cnt++); - - em_free(event); - return -1; - } - - /* - * Check IP address and port: compare packet against the stored - * values in the queue context - */ - fp = &q_ctx->flow_params; - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - } else { - if (unlikely(proto != IPV4_PROTO_UDP)) { - APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", - em_core_id(), drop_cnt++); - em_free(event); - return -1; - } - - test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || - ipv4_dst >= - (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || - port_dst < UDP_PORT_BASE || - port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || - proto != IPV4_PROTO_UDP, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Values not in the configurated range!\n" - "Abort!", - queue, ipv4_dst, port_dst, proto); - } - - /* Everything OK, return zero */ - return 0; -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Simple Load Balanced Packet-IO test application capable of receiving multiple + * events at a time (the EO is created with a multi-event receive function) + * + * The application (EO) receives a batch of UDP datagrams and exchanges + * the src-dst addesses before sending the datagrams back out. + * + * Based on the loopback.c application + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ + +/** + * Set the used queue type for EM queues receiving packet data. + * + * Try also with EM_QUEUE_TYPE_PARALLEL or EM_QUEUE_TYPE_PARALLEL_ORDERED. + * Alt. set QUEUE_TYPE_MIX to '1' to use all queue types simultaneously. + */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ + +/** + * Test with all different queue types simultaneously: + * ATOMIC, PARALLELL, PARALLEL_ORDERED + */ +#define QUEUE_TYPE_MIX 0 /* 0=False or 1=True */ + +/** + * Create an EM queue per UDP/IP flow or use the default queue. + * + * If set to '0' then all traffic is routed through one 'default queue'(slow), + * if set to '1' each traffic flow is routed to its own EM-queue. + */ +#define QUEUE_PER_FLOW 1 /* 0=False or 1=True */ + +/** + * Select whether the UDP ports should be unique over all the IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured + * once for each IP-interface). Using '0' (not unique) makes it easier to + * copy traffic generator settings from one IF-port to another as only the + * dst-IP address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** + * Select whether the input and output ports should be cross-connected. + */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** + * Enable per packet error checking + */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +/* Configure the IP addresses and UDP ports that this application will use */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 64 + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 +/* + * IANA Dynamic Ports (Private or Ephemeral Ports), + * from 49152 to 65535 (never assigned) + */ +/* #define UDP_PORT_BASE 0xC000 */ + +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ + +#define NUM_PKTIN_QUEUES (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +#define MAX_RCV_FN_EVENTS 256 + +/** + * EO context + */ +typedef struct { + em_eo_t eo; + char name[32]; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /** default queue: pkts/events not matching any other input criteria */ + em_queue_t default_queue; + /** all created input queues */ + em_queue_t queue[NUM_PKTIN_QUEUES]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue-Context, i.e. queue specific data, each queue has its own instance + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** queue handle */ + em_queue_t queue; +} queue_context_t; + +/** + * Packet Loopback shared memory + */ +typedef struct { + /** EO (application) context */ + eo_context_t eo_ctx; + /** + * Array containing the contexts of all the queues handled by the EO. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_t eo_q_ctx[NUM_PKTIN_QUEUES] ENV_CACHE_LINE_ALIGNED; + + /** Queue context for the default queue */ + queue_context_t def_q_ctx; +} packet_loopback_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL packet_loopback_shm_t *pkt_shm; + +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx); + +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context); + +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Loopback test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktLoopShMem", + sizeof(packet_loopback_shm_t)); + em_register_error_handler(test_error_handler); + } else { + pkt_shm = env_shared_lookup("PktLoopShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Loopback init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_loopback_shm_t)); +} + +/** + * Startup of the Packet Loopback test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + em_eo_multircv_param_t eo_param; + eo_context_t *eo_ctx; + em_status_t ret, start_fn_ret = EM_ERROR; + int if_id, i; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* + * Create one EO + */ + eo_ctx = &pkt_shm->eo_ctx; + /* Initialize EO context data to '0' */ + memset(eo_ctx, 0, sizeof(eo_context_t)); + + /* Init EO params */ + em_eo_multircv_param_init(&eo_param); + /* Set EO params needed by this application */ + eo_param.start = start_eo; + eo_param.local_start = start_eo_local; + eo_param.stop = stop_eo; + eo_param.receive_multi = receive_eo_packet_multi; + eo_param.max_events = MAX_RCV_FN_EVENTS; + eo_param.eo_ctx = eo_ctx; + eo = em_eo_create_multircv(appl_conf->name, &eo_param); + test_fatal_if(eo == EM_EO_UNDEF, "em_eo_create() failed"); + eo_ctx->eo = eo; + + /* Store the number of pktio interfaces used */ + eo_ctx->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + eo_ctx->if_ids[i] = if_id; + } + + /* Start the EO - queues etc. created in the EO start function */ + ret = em_eo_start_sync(eo, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * All input & output queues have been created and enabled in the + * EO start function, now direct pktio traffic to those queues. + */ + for (i = 0; i < NUM_PKTIN_QUEUES; i++) { + /* Direct ip_addr:udp_port into this queue */ + queue_context_t *q_ctx = &pkt_shm->eo_q_ctx[i]; + uint32_t ip_addr = q_ctx->flow_params.ipv4; + uint16_t port = q_ctx->flow_params.port; + uint8_t proto = q_ctx->flow_params.proto; + em_queue_t queue = q_ctx->queue; + em_queue_t tmp_q; + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + pktio_add_queue(proto, ip_addr, port, queue); + + /* Sanity checks (lookup what was configured) */ + tmp_q = pktio_lookup_sw(proto, ip_addr, port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || tmp_q != queue, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, port, queue, tmp_q); + /* Print first and last mapping */ + if (i == 0 || i == NUM_PKTIN_QUEUES - 1) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, port, tmp_q); + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(eo_ctx->default_queue); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + eo_context_t *const eo_ctx = &pkt_shm->eo_ctx; + em_eo_t eo = eo_ctx->eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + * + * The global start function creates the application specific queues and + * associates the queues with the EO and the packet flows it wants to process. + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + em_queue_t def_queue, pktout_queue; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + em_status_t ret; + eo_context_t *const eo_ctx = eo_context; + queue_context_t *defq_ctx; + int if_id; + int i, j; + + (void)conf; + + /* Store the EO name in the EO-context data */ + em_eo_get_name(eo, eo_ctx->name, sizeof(eo_ctx->name)); + + APPL_PRINT("EO %" PRI_EO ":'%s' global start, if-count:%d\n", + eo, eo_ctx->name, eo_ctx->if_count); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + eo_ctx->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + eo_ctx->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* + * Default queue for all packets not mathing any + * specific input queue criteria + */ + def_queue = em_queue_create("default", QUEUE_TYPE, EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(def_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed"); + + /* Store the default queue Id in the EO-context data */ + eo_ctx->default_queue = def_queue; + + /* Associate the queue with this EO */ + ret = em_eo_add_queue_sync(eo, def_queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo, def_queue); + + /* Set queue context for the default queue */ + defq_ctx = &pkt_shm->def_q_ctx; + ret = em_queue_set_context(eo_ctx->default_queue, defq_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx for the default queue failed:%" PRI_STAT "\n" + "default-Q:%" PRI_QUEUE "", ret, def_queue); + + /* Set the pktout queues to use for the default queue, one per if */ + set_pktout_queues(def_queue, eo_ctx, defq_ctx->pktout_queue/*out*/); + + if (QUEUE_PER_FLOW) + create_queue_per_flow(eo, eo_ctx); + + APPL_PRINT("EO %" PRI_EO " global start done.\n", eo); + + return EM_OK; +} + +/** + * Helper func for EO start() to create a queue per packet flow (if configured) + */ +static void +create_queue_per_flow(const em_eo_t eo, eo_context_t *const eo_ctx) +{ + uint16_t port_offset = (uint16_t)-1; + uint32_t q_ctx_idx = 0; + queue_context_t *q_ctx; + em_queue_type_t qtype; + em_queue_t queue; + em_status_t ret; + int i, j; + + memset(pkt_shm->eo_q_ctx, 0, sizeof(pkt_shm->eo_q_ctx)); + + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + + if (!QUEUE_TYPE_MIX) { + /* Use only queues of a single type */ + qtype = QUEUE_TYPE; + } else { + /* Spread out over the 3 diff queue-types */ + int nbr_q = ((i * NUM_PORTS_PER_IP) + j) % 3; + + if (nbr_q == 0) + qtype = EM_QUEUE_TYPE_ATOMIC; + else if (nbr_q == 1) + qtype = EM_QUEUE_TYPE_PARALLEL; + else + qtype = EM_QUEUE_TYPE_PARALLEL_ORDERED; + } + + /* Create a queue */ + queue = em_queue_create("udp-flow", qtype, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue create failed: UDP-port %d", + udp_port); + /* + * Store the id of the created queue into the + * application specific EO-context + */ + eo_ctx->queue[q_ctx_idx] = queue; + + /* Set queue specific appl (EO) context */ + q_ctx = &pkt_shm->eo_q_ctx[q_ctx_idx]; + /* Save flow params */ + q_ctx->flow_params.ipv4 = ip_addr; + q_ctx->flow_params.port = udp_port; + q_ctx->flow_params.proto = IPV4_PROTO_UDP; + q_ctx->queue = queue; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "Set Q-ctx failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue); + + /* Add the queue to the EO */ + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo, queue); + + /* + * Set the pktout queues to use for this input queue, + * one pktout queue per interface. + */ + set_pktout_queues(queue, eo_ctx, + q_ctx->pktout_queue/*out*/); + + /* Update the Queue Context Index */ + q_ctx_idx++; + test_fatal_if(q_ctx_idx > NUM_PKTIN_QUEUES, + "Too many queues!"); + } + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(em_queue_t queue, eo_context_t *const eo_ctx, + em_queue_t pktout_queue[/*out*/]) +{ + int if_count = eo_ctx->if_count; + int pktout_idx = (uintptr_t)queue % eo_ctx->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = eo_ctx->if_ids[i]; + pktout_queue[id] = eo_ctx->pktout_queue[id][pktout_idx]; + } +} + +/** + * EO Local start function (run once at startup on EACH core) + + * Not really needed in this application, but included + * to demonstrate usage. + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + + APPL_PRINT("EO %" PRI_EO ":%s local start on EM-core%u\n", + eo, eo_ctx->name, em_core_id()); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + eo_context_t *eo_ctx = eo_context; + em_status_t ret; + em_queue_t pktout_queue; + int if_id; + int i, j; + + APPL_PRINT("EO %" PRI_EO ":%s stopping\n", eo, eo_ctx->name); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + /* Delete the packet output queues created for each interface */ + for (i = 0; i < eo_ctx->if_count; i++) { + if_id = eo_ctx->if_ids[i]; + for (j = 0; j < eo_ctx->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = eo_ctx->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } + + return EM_OK; +} + +/** + * EO event receive function + */ +static void +receive_eo_packet_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context) +{ + queue_context_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + int ret, i; + + if (unlikely(appl_shm->exit_flag)) { + em_free_multi(event_tbl, num); + return; + } + + in_port = pktio_input_port(event_tbl[0]); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + if (ENABLE_ERROR_CHECKS) { + eo_context_t *const eo_ctx = eo_context; + + for (i = 0; i < num; i++) + if (rx_error_check(eo_ctx, event_tbl[i], + queue, q_ctx) != 0) + return; + } + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + for (i = 0; i < num; i++) + pktio_swap_addrs(event_tbl[i]); + + if (ALLOC_COPY_FREE) /* alloc event, copy contents & free original */ + for (i = 0; i < num; i++) + event_tbl[i] = alloc_copy_free(event_tbl[i]); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + ret = em_send_multi(event_tbl, num, pktout_queue); + if (unlikely(ret != num)) + em_free_multi(&event_tbl[ret], num - ret); +} + +static inline int +rx_error_check(eo_context_t *const eo_ctx, const em_event_t event, + const em_queue_t queue, queue_context_t *const q_ctx) +{ + static ENV_LOCAL uint64_t drop_cnt = 1; + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + if (QUEUE_PER_FLOW) { + flow_params_t *fp; + + /* Drop everything from the default queue */ + if (unlikely(queue == eo_ctx->default_queue)) { + char ip_str[sizeof("255.255.255.255")]; + + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + + APPL_PRINT("Pkt %s:%" PRIu16 " defQ drop-%d-#%" PRIu64 "\n", + ip_str, port_dst, em_core_id(), drop_cnt++); + + em_free(event); + return -1; + } + + /* + * Check IP address and port: compare packet against the stored + * values in the queue context + */ + fp = &q_ctx->flow_params; + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + } else { + if (unlikely(proto != IPV4_PROTO_UDP)) { + APPL_PRINT("Pkt: defQ, not UDP drop-%d-#%" PRIu64 "\n", + em_core_id(), drop_cnt++); + em_free(event); + return -1; + } + + test_fatal_if(ipv4_dst < (uint32_t)IP_ADDR_BASE || + ipv4_dst >= + (uint32_t)(IP_ADDR_BASE + NUM_IP_ADDRS) || + port_dst < UDP_PORT_BASE || + port_dst >= (UDP_PORT_BASE + NUM_PKTIN_QUEUES) || + proto != IPV4_PROTO_UDP, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Values not in the configurated range!\n" + "Abort!", + queue, ipv4_dst, port_dst, proto); + } + + /* Everything OK, return zero */ + return 0; +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} diff --git a/programs/packet_io/multi_stage.c b/programs/packet_io/multi_stage.c index 1b9d7fb1..19646f0f 100644 --- a/programs/packet_io/multi_stage.c +++ b/programs/packet_io/multi_stage.c @@ -1,979 +1,989 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Load Balanced, multi-staged packet-IO test application. - * - * The created UDP flows are received and processed by three (3) chained EOs - * before sending the datagrams back out. Uses EM queues of different priority - * and type. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 32 -#define NUM_FLOWS (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 - -/** - * The number of different EM queue priority levels to use - fixed. - */ -#define Q_PRIO_LEVELS 1 - -/** - * The number of processing stages for a flow, i.e. the number of EO's a - * packet will go through before being sent back out - fixed. - */ -#define PROCESSING_STAGES 3 - -/** - * Test with different scheduled queue types if set to '1': - * ATOMIC, PARALLELL, PARALLEL_ORDERED - */ -#define QUEUE_TYPE_MIX 1 /* 0=False or 1=True(default) */ - -/** - * Set the used Queue-type for the benchmarking cases when using only - * one queue type, i.e. valid only when QUEUE_TYPE_MIX is '0' - */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ - -/** - * The number of Queue Type permutations: - * Three (3) queue types (ATOMIC, PARALLELL, PARALLEL-ORDERED) in - * three (3) stages gives 3*3*3 = 27 permutations. - * Only used if QUEUE_TYPE_MIX is '1' - */ -#define QUEUE_TYPE_PERMUTATIONS (3 * 3 * 3) - -/** - * Select whether the UDP ports should be unique over all IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured once - * for each IP-interface). Using '0' (not unique) makes it easier to copy - * traffic generator settings from one IF-port to another as only the dst-IP - * address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** Select whether the input and output ports should be cross-connected. */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** Enable per packet error checking */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -/** - * EO context, use common struct for all three EOs - */ -typedef struct { - em_eo_t eo; - em_queue_t default_queue; /* Only used by the first EO handling pktin */ -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue context, i.e. queue specific data - */ -typedef struct { - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** The destination queue of the next stage in the pipeline */ - em_queue_t dst_queue; -} queue_context_1st_t; - -/** - * Queue context, i.e. queue specific data - */ -typedef struct { - em_queue_t dst_queue; -} queue_context_2nd_t; - -/** - * Queue context, i.e. queue specific data - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; -} queue_context_3rd_t; - -/** - * Queue types used by the three chained EOs processing a flow - */ -typedef struct { - em_queue_type_t queue_type_1st; - em_queue_type_t queue_type_2nd; - em_queue_type_t queue_type_3rd; - /* Note: 'queue_type_4th' is always 'EM_QUEUE_TYPE_PKTOUT' */ -} queue_type_tuple_t; - -/** - * Packet Multi-Stage shared memory - * Read-only after start-up, no cache-line separation needed. - */ -typedef struct { - /** EO (application) contexts */ - eo_context_t eo_ctx[PROCESSING_STAGES]; - /** - * Arrays containing the contexts of all the queues handled by the EOs. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_1st_t eo_q_ctx_1st[NUM_FLOWS]; - queue_context_2nd_t eo_q_ctx_2nd[NUM_FLOWS]; - queue_context_3rd_t eo_q_ctx_3rd[NUM_FLOWS]; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /* All possible permutations of the used queue types */ - queue_type_tuple_t q_type_permutations[QUEUE_TYPE_PERMUTATIONS]; -} packet_multi_stage_shm_t; - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL packet_multi_stage_shm_t *pkt_shm; - -static em_status_t -mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static em_status_t -stop_eo_local(void *eo_context, em_eo_t eo); - -/* - * Helpers: - */ -static queue_type_tuple_t* -get_queue_type_tuple(int cnt); - -static void -fill_q_type_permutations(void); - -static em_queue_type_t -queue_types(int cnt); - -static void -set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Multi-stage test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktMStageShMem", - sizeof(packet_multi_stage_shm_t)); - em_register_error_handler(mstage_error_handler); - } else { - pkt_shm = env_shared_lookup("PktMStageShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Multi-Stage init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_multi_stage_shm_t)); -} - -/** - * Startup of the Packet Multi-stage test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo_1st, eo_2nd, eo_3rd; - em_queue_t default_queue, pktout_queue; - em_queue_t queue_1st, queue_2nd, queue_3rd; - em_queue_t tmp_q; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - queue_context_1st_t *q_ctx_1st; - queue_context_2nd_t *q_ctx_2nd; - queue_context_3rd_t *q_ctx_3rd; - queue_type_tuple_t *q_type_tuple; - em_status_t ret, start_fn_ret = EM_ERROR; - uint16_t port_offset = (uint16_t)-1; - int q_ctx_idx = 0; - int if_id, i, j; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* Store the number of pktio interfaces used */ - pkt_shm->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - pkt_shm->if_ids[i] = if_id; - } - - /* Use different prios for the queues */ - const em_queue_prio_t q_prio[Q_PRIO_LEVELS] = {EM_QUEUE_PRIO_NORMAL}; - - /* Initialize the Queue-type permutations array */ - fill_q_type_permutations(); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - pkt_shm->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < pkt_shm->if_count; i++) { - if_id = pkt_shm->if_ids[i]; - for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - pkt_shm->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* Create EOs, 3 stages of processing for each flow */ - memset(pkt_shm->eo_ctx, 0, sizeof(pkt_shm->eo_ctx)); - eo_1st = em_eo_create("packet_mstage_1st", start_eo, start_eo_local, - stop_eo, stop_eo_local, receive_packet_eo_1st, - &pkt_shm->eo_ctx[0]); - eo_2nd = em_eo_create("packet_mstage_2nd", start_eo, start_eo_local, - stop_eo, stop_eo_local, receive_packet_eo_2nd, - &pkt_shm->eo_ctx[1]); - eo_3rd = em_eo_create("packet_mstage_3rd", start_eo, start_eo_local, - stop_eo, stop_eo_local, receive_packet_eo_3rd, - &pkt_shm->eo_ctx[2]); - - /* Start the EOs */ - ret = em_eo_start_sync(eo_3rd, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - ret = em_eo_start_sync(eo_2nd, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - ret = em_eo_start_sync(eo_1st, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * Default queue for all packets, handled by EO 1, receives all - * unwanted packets (EO 1 drops them) - * Note: The queue type is EM_QUEUE_TYPE_PARALLEL ! - */ - default_queue = em_queue_create("default", EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_LOWEST, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(default_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed!"); - - /* Store the default queue Id on the EO-context data */ - pkt_shm->eo_ctx[0].default_queue = default_queue; - - /* Associate the queue with EO 1 */ - ret = em_eo_add_queue_sync(eo_1st, default_queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo_1st, default_queue); - - /* Zero the Queue context arrays */ - memset(pkt_shm->eo_q_ctx_1st, 0, sizeof(pkt_shm->eo_q_ctx_1st)); - memset(pkt_shm->eo_q_ctx_2nd, 0, sizeof(pkt_shm->eo_q_ctx_2nd)); - memset(pkt_shm->eo_q_ctx_3rd, 0, sizeof(pkt_shm->eo_q_ctx_3rd)); - - /* Create queues for the input packet flows */ - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - em_queue_prio_t prio; - em_queue_type_t queue_type; - em_queue_group_t queue_group = EM_QUEUE_GROUP_DEFAULT; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - /* Get the queue types for this 3-tuple */ - q_type_tuple = get_queue_type_tuple(q_ctx_idx); - /* Get the queue priority for this 3-tuple */ - prio = q_prio[q_ctx_idx % Q_PRIO_LEVELS]; - - /* - * Create the packet-IO (input/Rx) queue - * for 'eo_1st' for this flow - */ - queue_type = q_type_tuple->queue_type_1st; - queue_1st = em_queue_create("udp_port", queue_type, - prio, queue_group, NULL); - test_fatal_if(queue_1st == EM_QUEUE_UNDEF, - "1.Queue create fail: UDP-port %d", - udp_port); - - q_ctx_1st = &pkt_shm->eo_q_ctx_1st[q_ctx_idx]; - ret = em_queue_set_context(queue_1st, q_ctx_1st); - test_fatal_if(ret != EM_OK, - "Queue-ctx set failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue_1st); - - ret = em_eo_add_queue_sync(eo_1st, queue_1st); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo_1st, queue_1st); - - /* - * Create the middle queue for 'eo_2nd' for this flow - */ - queue_type = q_type_tuple->queue_type_2nd; - queue_2nd = em_queue_create("udp_port", queue_type, - prio, queue_group, NULL); - test_fatal_if(queue_2nd == EM_QUEUE_UNDEF, - "2.Queue create fail: UDP-port %d", - udp_port); - - q_ctx_2nd = &pkt_shm->eo_q_ctx_2nd[q_ctx_idx]; - ret = em_queue_set_context(queue_2nd, q_ctx_2nd); - test_fatal_if(ret != EM_OK, - "Q-ctx set failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue_2nd); - - ret = em_eo_add_queue_sync(eo_2nd, queue_2nd); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo_2nd, queue_2nd); - - /* Save stage1 dst queue */ - q_ctx_1st->dst_queue = queue_2nd; - - /* - * Create the last queue for 'eo_3rd' for this flow, - * eo-3rd sends the event/packet out to where it - * originally came from - */ - queue_type = q_type_tuple->queue_type_3rd; - queue_3rd = em_queue_create("udp_port", queue_type, - prio, queue_group, NULL); - test_fatal_if(queue_3rd == EM_QUEUE_UNDEF, - "3.Queue create fail: UDP-port %d", - udp_port); - - q_ctx_3rd = &pkt_shm->eo_q_ctx_3rd[q_ctx_idx]; - ret = em_queue_set_context(queue_3rd, q_ctx_3rd); - test_fatal_if(ret != EM_OK, - "Q-ctx set failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue_3rd); - - ret = em_eo_add_queue_sync(eo_3rd, queue_3rd); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo_3rd, queue_3rd); - - /* Save stage2 dst queue */ - q_ctx_2nd->dst_queue = queue_3rd; - - /* - * Set the pktout queues to use for this queue, - * one pktout queue per interface. - */ - set_pktout_queues(q_ctx_idx, - q_ctx_3rd->pktout_queue/*out*/); - - /* - * Direct this ip_addr:udp_port into the first queue - */ - pktio_add_queue(IPV4_PROTO_UDP, ip_addr, udp_port, - queue_1st); - - /* Save the flow params for debug checks in Rx */ - q_ctx_1st->flow_params.ipv4 = ip_addr; - q_ctx_1st->flow_params.port = udp_port; - q_ctx_1st->flow_params.proto = IPV4_PROTO_UDP; - - /* Sanity checks (lookup what was configured above) */ - tmp_q = pktio_lookup_sw(IPV4_PROTO_UDP, - ip_addr, udp_port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || - tmp_q != queue_1st, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, udp_port, queue_1st, - tmp_q); - /* Print first and last mapping */ - if (q_ctx_idx == 0 || - q_ctx_idx == (NUM_IP_ADDRS * NUM_PORTS_PER_IP - 1)) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, udp_port, tmp_q); - - /* Update the Queue Context Index for the next round*/ - q_ctx_idx++; - } - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(default_queue); - - env_sync_mem(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_status_t ret; - em_queue_t pktout_queue; - int if_id; - int i, j; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - for (i = 0; i < PROCESSING_STAGES; i++) { - em_eo_t eo = pkt_shm->eo_ctx[i].eo; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - } - - for (i = 0; i < pkt_shm->if_count; i++) { - if_id = pkt_shm->if_ids[i]; - for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = pkt_shm->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo = eo; - /* eo_ctx->default_queue = Stored earlier in packet_multi_stage_start*/ - - env_sync_mem(); - - return EM_OK; -} - -/** - * EO Local start function (run once at startup on EACH core) - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("Core%i: EO %" PRI_EO " local start.\n", em_core_id(), eo); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * EO local stop function - */ -static em_status_t -stop_eo_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("Core%i: EO %" PRI_EO " local stop.\n", em_core_id(), eo); - - return EM_OK; -} - -/** - * EO_1st receive function - */ -static void -receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_1st_t *const q_ctx = queue_context; - em_status_t status; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - /* Drop everything from the default queue */ - if (unlikely(queue == eo_ctx->default_queue)) { - static ENV_LOCAL uint64_t drop_cnt = 1; - - /* - * Print notice about pkt drop for the first pkt only to avoid - * flooding the terminal with prints. - */ - if (drop_cnt == 1) { - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - char ip_str[sizeof("255.255.255.255")]; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - APPL_PRINT("Drop: pkt received from %s:%u, core%d\n", - ip_str, port_dst, em_core_id()); - } - em_free(event); - drop_cnt++; - return; - } - - if (ENABLE_ERROR_CHECKS) { /* Check IP address and port */ - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - flow_params_t *const fp = &q_ctx->flow_params; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - } - - /* Send to the next stage for further processing. */ - status = em_send(event, q_ctx->dst_queue); - - if (unlikely(status != EM_OK)) - em_free(event); -} - -/** - * EO_2nd receive function - */ -static void -receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - queue_context_2nd_t *const q_ctx = queue_context; - em_status_t status; - - (void)type; - (void)eo_context; - (void)queue; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - /* Send to the next stage for further processing. */ - status = em_send(event, q_ctx->dst_queue); - - if (unlikely(status != EM_OK)) - em_free(event); -} - -/** - * EO_3rd receive function - */ -static void -receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - queue_context_3rd_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - em_status_t status; - - (void)type; - (void)eo_context; - (void)queue; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - in_port = pktio_input_port(event); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - pktio_swap_addrs(event); - - if (ALLOC_COPY_FREE) - event = alloc_copy_free(event); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - status = em_send(event, pktout_queue); - if (unlikely(status != EM_OK)) - em_free(event); -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} - -/** - * Helper func to determine queue types at startup - */ -static queue_type_tuple_t * -get_queue_type_tuple(int cnt) -{ - if (!QUEUE_TYPE_MIX) /* Always return the same kind of Queue types */ - return &pkt_shm->q_type_permutations[0]; - - /* Spread out over the different queue-types */ - const int idx = cnt % QUEUE_TYPE_PERMUTATIONS; - - return &pkt_shm->q_type_permutations[idx]; -} - -/** - * Helper func to initialize the Queue Type permutations array - * - * 3 queue types gives 3*3*3=27 permutations - store these. - */ -static void -fill_q_type_permutations(void) -{ - queue_type_tuple_t *tuple; - - if (!QUEUE_TYPE_MIX) { - tuple = &pkt_shm->q_type_permutations[0]; - /* Use the same type of queues everywhere. */ - tuple->queue_type_1st = QUEUE_TYPE; - tuple->queue_type_2nd = QUEUE_TYPE; - tuple->queue_type_3rd = QUEUE_TYPE; - return; - } - - int i, j, k; - em_queue_type_t queue_type_1st, queue_type_2nd, queue_type_3rd; - int nbr_q = 0; - - for (i = 0; i < 3; i++) { - for (j = 0; j < 3; j++) { - for (k = 0; k < 3; k++, nbr_q++) { - queue_type_1st = queue_types(i); - queue_type_2nd = queue_types(j); - queue_type_3rd = queue_types(k); - - tuple = &pkt_shm->q_type_permutations[nbr_q]; - tuple->queue_type_1st = queue_type_1st; - tuple->queue_type_2nd = queue_type_2nd; - tuple->queue_type_3rd = queue_type_3rd; - } - } - } -} - -/** - * Helper func, returns a Queue Type based on the input count. - */ -static em_queue_type_t -queue_types(int cnt) -{ - switch (cnt % 3) { - case 0: - return EM_QUEUE_TYPE_ATOMIC; - case 1: - return EM_QUEUE_TYPE_PARALLEL; - default: - return EM_QUEUE_TYPE_PARALLEL_ORDERED; - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]) -{ - int if_count = pkt_shm->if_count; - int pktout_idx = q_idx % pkt_shm->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = pkt_shm->if_ids[i]; - pktout_queue[id] = pkt_shm->pktout_queue[id][pktout_idx]; - } -} - -static em_status_t -mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args) -{ - /* - * Don't report/log/print em_send() errors, instead return the error - * code and let the application free the event that failed to be sent. - * This avoids a print/log storm in an overloaded situation, i.e. when - * sending input packets at a higher rate that can be sustained. - */ - if (!EM_ERROR_IS_FATAL(error) && - (escope == EM_ESCOPE_SEND || escope == EM_ESCOPE_SEND_MULTI)) - return error; - - return test_error_handler(eo, error, escope, args); -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Load Balanced, multi-staged packet-IO test application. + * + * The created UDP flows are received and processed by three (3) chained EOs + * before sending the datagrams back out. Uses EM queues of different priority + * and type. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 32 +#define NUM_FLOWS (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 + +/** + * The number of different EM queue priority levels to use - fixed. + */ +#define Q_PRIO_LEVELS 1 + +/** + * The number of processing stages for a flow, i.e. the number of EO's a + * packet will go through before being sent back out - fixed. + */ +#define PROCESSING_STAGES 3 + +/** + * Test with different scheduled queue types if set to '1': + * ATOMIC, PARALLELL, PARALLEL_ORDERED + */ +#define QUEUE_TYPE_MIX 1 /* 0=False or 1=True(default) */ + +/** + * Set the used Queue-type for the benchmarking cases when using only + * one queue type, i.e. valid only when QUEUE_TYPE_MIX is '0' + */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ + +/** + * The number of Queue Type permutations: + * Three (3) queue types (ATOMIC, PARALLELL, PARALLEL-ORDERED) in + * three (3) stages gives 3*3*3 = 27 permutations. + * Only used if QUEUE_TYPE_MIX is '1' + */ +#define QUEUE_TYPE_PERMUTATIONS (3 * 3 * 3) + +/** + * Select whether the UDP ports should be unique over all IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured once + * for each IP-interface). Using '0' (not unique) makes it easier to copy + * traffic generator settings from one IF-port to another as only the dst-IP + * address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** Select whether the input and output ports should be cross-connected. */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** Enable per packet error checking */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +/** + * EO context, use common struct for all three EOs + */ +typedef struct { + em_eo_t eo; + em_queue_t default_queue; /* Only used by the first EO handling pktin */ +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue context, i.e. queue specific data + */ +typedef struct { + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** The destination queue of the next stage in the pipeline */ + em_queue_t dst_queue; +} queue_context_1st_t; + +/** + * Queue context, i.e. queue specific data + */ +typedef struct { + em_queue_t dst_queue; +} queue_context_2nd_t; + +/** + * Queue context, i.e. queue specific data + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; +} queue_context_3rd_t; + +/** + * Queue types used by the three chained EOs processing a flow + */ +typedef struct { + em_queue_type_t queue_type_1st; + em_queue_type_t queue_type_2nd; + em_queue_type_t queue_type_3rd; + /* Note: 'queue_type_4th' is always 'EM_QUEUE_TYPE_PKTOUT' */ +} queue_type_tuple_t; + +/** + * Packet Multi-Stage shared memory + * Read-only after start-up, no cache-line separation needed. + */ +typedef struct { + /** EO (application) contexts */ + eo_context_t eo_ctx[PROCESSING_STAGES]; + /** + * Arrays containing the contexts of all the queues handled by the EOs. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_1st_t eo_q_ctx_1st[NUM_FLOWS]; + queue_context_2nd_t eo_q_ctx_2nd[NUM_FLOWS]; + queue_context_3rd_t eo_q_ctx_3rd[NUM_FLOWS]; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /* All possible permutations of the used queue types */ + queue_type_tuple_t q_type_permutations[QUEUE_TYPE_PERMUTATIONS]; +} packet_multi_stage_shm_t; + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL packet_multi_stage_shm_t *pkt_shm; + +static em_status_t +mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static em_status_t +stop_eo_local(void *eo_context, em_eo_t eo); + +/* + * Helpers: + */ +static queue_type_tuple_t* +get_queue_type_tuple(int cnt); + +static void +fill_q_type_permutations(void); + +static em_queue_type_t +queue_types(int cnt); + +static void +set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Multi-stage test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktMStageShMem", + sizeof(packet_multi_stage_shm_t)); + em_register_error_handler(mstage_error_handler); + } else { + pkt_shm = env_shared_lookup("PktMStageShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Multi-Stage init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_multi_stage_shm_t)); +} + +/** + * Startup of the Packet Multi-stage test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo_1st, eo_2nd, eo_3rd; + em_queue_t default_queue, pktout_queue; + em_queue_t queue_1st, queue_2nd, queue_3rd; + em_queue_t tmp_q; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + queue_context_1st_t *q_ctx_1st; + queue_context_2nd_t *q_ctx_2nd; + queue_context_3rd_t *q_ctx_3rd; + queue_type_tuple_t *q_type_tuple; + em_status_t ret, start_fn_ret = EM_ERROR; + uint16_t port_offset = (uint16_t)-1; + int q_ctx_idx = 0; + int if_id, i, j; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* Store the number of pktio interfaces used */ + pkt_shm->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + pkt_shm->if_ids[i] = if_id; + } + + /* Use different prios for the queues */ + const em_queue_prio_t q_prio[Q_PRIO_LEVELS] = {EM_QUEUE_PRIO_NORMAL}; + + /* Initialize the Queue-type permutations array */ + fill_q_type_permutations(); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + pkt_shm->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < pkt_shm->if_count; i++) { + if_id = pkt_shm->if_ids[i]; + for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + pkt_shm->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* Create EOs, 3 stages of processing for each flow */ + memset(pkt_shm->eo_ctx, 0, sizeof(pkt_shm->eo_ctx)); + eo_1st = em_eo_create("packet_mstage_1st", start_eo, start_eo_local, + stop_eo, stop_eo_local, receive_packet_eo_1st, + &pkt_shm->eo_ctx[0]); + eo_2nd = em_eo_create("packet_mstage_2nd", start_eo, start_eo_local, + stop_eo, stop_eo_local, receive_packet_eo_2nd, + &pkt_shm->eo_ctx[1]); + eo_3rd = em_eo_create("packet_mstage_3rd", start_eo, start_eo_local, + stop_eo, stop_eo_local, receive_packet_eo_3rd, + &pkt_shm->eo_ctx[2]); + + /* Start the EOs */ + ret = em_eo_start_sync(eo_3rd, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + ret = em_eo_start_sync(eo_2nd, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + ret = em_eo_start_sync(eo_1st, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * Default queue for all packets, handled by EO 1, receives all + * unwanted packets (EO 1 drops them) + * Note: The queue type is EM_QUEUE_TYPE_PARALLEL ! + */ + default_queue = em_queue_create("default", EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_LOWEST, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(default_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed!"); + + /* Store the default queue Id on the EO-context data */ + pkt_shm->eo_ctx[0].default_queue = default_queue; + + /* Associate the queue with EO 1 */ + ret = em_eo_add_queue_sync(eo_1st, default_queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo_1st, default_queue); + + /* Zero the Queue context arrays */ + memset(pkt_shm->eo_q_ctx_1st, 0, sizeof(pkt_shm->eo_q_ctx_1st)); + memset(pkt_shm->eo_q_ctx_2nd, 0, sizeof(pkt_shm->eo_q_ctx_2nd)); + memset(pkt_shm->eo_q_ctx_3rd, 0, sizeof(pkt_shm->eo_q_ctx_3rd)); + + /* Create queues for the input packet flows */ + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + em_queue_prio_t prio; + em_queue_type_t queue_type; + em_queue_group_t queue_group = EM_QUEUE_GROUP_DEFAULT; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + /* Get the queue types for this 3-tuple */ + q_type_tuple = get_queue_type_tuple(q_ctx_idx); + /* Get the queue priority for this 3-tuple */ + prio = q_prio[q_ctx_idx % Q_PRIO_LEVELS]; + + /* + * Create the packet-IO (input/Rx) queue + * for 'eo_1st' for this flow + */ + queue_type = q_type_tuple->queue_type_1st; + queue_1st = em_queue_create("udp_port", queue_type, + prio, queue_group, NULL); + test_fatal_if(queue_1st == EM_QUEUE_UNDEF, + "1.Queue create fail: UDP-port %d", + udp_port); + + q_ctx_1st = &pkt_shm->eo_q_ctx_1st[q_ctx_idx]; + ret = em_queue_set_context(queue_1st, q_ctx_1st); + test_fatal_if(ret != EM_OK, + "Queue-ctx set failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue_1st); + + ret = em_eo_add_queue_sync(eo_1st, queue_1st); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo_1st, queue_1st); + + /* + * Create the middle queue for 'eo_2nd' for this flow + */ + queue_type = q_type_tuple->queue_type_2nd; + queue_2nd = em_queue_create("udp_port", queue_type, + prio, queue_group, NULL); + test_fatal_if(queue_2nd == EM_QUEUE_UNDEF, + "2.Queue create fail: UDP-port %d", + udp_port); + + q_ctx_2nd = &pkt_shm->eo_q_ctx_2nd[q_ctx_idx]; + ret = em_queue_set_context(queue_2nd, q_ctx_2nd); + test_fatal_if(ret != EM_OK, + "Q-ctx set failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue_2nd); + + ret = em_eo_add_queue_sync(eo_2nd, queue_2nd); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo_2nd, queue_2nd); + + /* Save stage1 dst queue */ + q_ctx_1st->dst_queue = queue_2nd; + + /* + * Create the last queue for 'eo_3rd' for this flow, + * eo-3rd sends the event/packet out to where it + * originally came from + */ + queue_type = q_type_tuple->queue_type_3rd; + queue_3rd = em_queue_create("udp_port", queue_type, + prio, queue_group, NULL); + test_fatal_if(queue_3rd == EM_QUEUE_UNDEF, + "3.Queue create fail: UDP-port %d", + udp_port); + + q_ctx_3rd = &pkt_shm->eo_q_ctx_3rd[q_ctx_idx]; + ret = em_queue_set_context(queue_3rd, q_ctx_3rd); + test_fatal_if(ret != EM_OK, + "Q-ctx set failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue_3rd); + + ret = em_eo_add_queue_sync(eo_3rd, queue_3rd); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo_3rd, queue_3rd); + + /* Save stage2 dst queue */ + q_ctx_2nd->dst_queue = queue_3rd; + + /* + * Set the pktout queues to use for this queue, + * one pktout queue per interface. + */ + set_pktout_queues(q_ctx_idx, + q_ctx_3rd->pktout_queue/*out*/); + + /* + * Direct this ip_addr:udp_port into the first queue + */ + pktio_add_queue(IPV4_PROTO_UDP, ip_addr, udp_port, + queue_1st); + + /* Save the flow params for debug checks in Rx */ + q_ctx_1st->flow_params.ipv4 = ip_addr; + q_ctx_1st->flow_params.port = udp_port; + q_ctx_1st->flow_params.proto = IPV4_PROTO_UDP; + + /* Sanity checks (lookup what was configured above) */ + tmp_q = pktio_lookup_sw(IPV4_PROTO_UDP, + ip_addr, udp_port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || + tmp_q != queue_1st, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, udp_port, queue_1st, + tmp_q); + /* Print first and last mapping */ + if (q_ctx_idx == 0 || + q_ctx_idx == (NUM_IP_ADDRS * NUM_PORTS_PER_IP - 1)) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, udp_port, tmp_q); + + /* Update the Queue Context Index for the next round*/ + q_ctx_idx++; + } + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(default_queue); + + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_status_t ret; + em_queue_t pktout_queue; + int if_id; + int i, j; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < PROCESSING_STAGES; i++) { + em_eo_t eo = pkt_shm->eo_ctx[i].eo; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } + + for (i = 0; i < pkt_shm->if_count; i++) { + if_id = pkt_shm->if_ids[i]; + for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = pkt_shm->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo = eo; + /* eo_ctx->default_queue = Stored earlier in packet_multi_stage_start*/ + + env_sync_mem(); + + return EM_OK; +} + +/** + * EO Local start function (run once at startup on EACH core) + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("Core%i: EO %" PRI_EO " local start.\n", em_core_id(), eo); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * EO local stop function + */ +static em_status_t +stop_eo_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("Core%i: EO %" PRI_EO " local stop.\n", em_core_id(), eo); + + return EM_OK; +} + +/** + * EO_1st receive function + */ +static void +receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_1st_t *const q_ctx = queue_context; + em_status_t status; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + /* Drop everything from the default queue */ + if (unlikely(queue == eo_ctx->default_queue)) { + static ENV_LOCAL uint64_t drop_cnt = 1; + + /* + * Print notice about pkt drop for the first pkt only to avoid + * flooding the terminal with prints. + */ + if (drop_cnt == 1) { + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + char ip_str[sizeof("255.255.255.255")]; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + APPL_PRINT("Drop: pkt received from %s:%u, core%d\n", + ip_str, port_dst, em_core_id()); + } + em_free(event); + drop_cnt++; + return; + } + + if (ENABLE_ERROR_CHECKS) { /* Check IP address and port */ + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + flow_params_t *const fp = &q_ctx->flow_params; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + } + + /* Send to the next stage for further processing. */ + status = em_send(event, q_ctx->dst_queue); + + if (unlikely(status != EM_OK)) + em_free(event); +} + +/** + * EO_2nd receive function + */ +static void +receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + queue_context_2nd_t *const q_ctx = queue_context; + em_status_t status; + + (void)type; + (void)eo_context; + (void)queue; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + /* Send to the next stage for further processing. */ + status = em_send(event, q_ctx->dst_queue); + + if (unlikely(status != EM_OK)) + em_free(event); +} + +/** + * EO_3rd receive function + */ +static void +receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + queue_context_3rd_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + em_status_t status; + + (void)type; + (void)eo_context; + (void)queue; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + in_port = pktio_input_port(event); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + pktio_swap_addrs(event); + + if (ALLOC_COPY_FREE) + event = alloc_copy_free(event); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + status = em_send(event, pktout_queue); + if (unlikely(status != EM_OK)) + em_free(event); +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} + +/** + * Helper func to determine queue types at startup + */ +static queue_type_tuple_t * +get_queue_type_tuple(int cnt) +{ + if (!QUEUE_TYPE_MIX) /* Always return the same kind of Queue types */ + return &pkt_shm->q_type_permutations[0]; + + /* Spread out over the different queue-types */ + const int idx = cnt % QUEUE_TYPE_PERMUTATIONS; + + return &pkt_shm->q_type_permutations[idx]; +} + +/** + * Helper func to initialize the Queue Type permutations array + * + * 3 queue types gives 3*3*3=27 permutations - store these. + */ +static void +fill_q_type_permutations(void) +{ + queue_type_tuple_t *tuple; + + if (!QUEUE_TYPE_MIX) { + tuple = &pkt_shm->q_type_permutations[0]; + /* Use the same type of queues everywhere. */ + tuple->queue_type_1st = QUEUE_TYPE; + tuple->queue_type_2nd = QUEUE_TYPE; + tuple->queue_type_3rd = QUEUE_TYPE; + return; + } + + int i, j, k; + em_queue_type_t queue_type_1st, queue_type_2nd, queue_type_3rd; + int nbr_q = 0; + + for (i = 0; i < 3; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 3; k++, nbr_q++) { + queue_type_1st = queue_types(i); + queue_type_2nd = queue_types(j); + queue_type_3rd = queue_types(k); + + tuple = &pkt_shm->q_type_permutations[nbr_q]; + tuple->queue_type_1st = queue_type_1st; + tuple->queue_type_2nd = queue_type_2nd; + tuple->queue_type_3rd = queue_type_3rd; + } + } + } +} + +/** + * Helper func, returns a Queue Type based on the input count. + */ +static em_queue_type_t +queue_types(int cnt) +{ + switch (cnt % 3) { + case 0: + return EM_QUEUE_TYPE_ATOMIC; + case 1: + return EM_QUEUE_TYPE_PARALLEL; + default: + return EM_QUEUE_TYPE_PARALLEL_ORDERED; + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]) +{ + int if_count = pkt_shm->if_count; + int pktout_idx = q_idx % pkt_shm->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = pkt_shm->if_ids[i]; + pktout_queue[id] = pkt_shm->pktout_queue[id][pktout_idx]; + } +} + +static em_status_t +mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args) +{ + /* + * Don't report/log/print em_send() errors, instead return the error + * code and let the application free the event that failed to be sent. + * This avoids a print/log storm in an overloaded situation, i.e. when + * sending input packets at a higher rate that can be sustained. + */ + if (!EM_ERROR_IS_FATAL(error) && + (escope == EM_ESCOPE_SEND || escope == EM_ESCOPE_SEND_MULTI)) + return error; + + return test_error_handler(eo, error, escope, args); +} diff --git a/programs/packet_io/multi_stage_local.c b/programs/packet_io/multi_stage_local.c index 1aeeda18..089927af 100644 --- a/programs/packet_io/multi_stage_local.c +++ b/programs/packet_io/multi_stage_local.c @@ -1,997 +1,1007 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Multi-staged packet-IO test application using also EM local queues - * (in addition to EM scheduled queues) between the EO pipeline stages. - * Using local queues bypasses the scheduling for a possible performance gain - * while still maintaining the separate EO pipeline stages. - * - * The created UDP flows are received and processed by three (3) chained EOs - * before sending the datagrams back out. Uses EM queues of different priority - * and type. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" -#include "cm_pktio.h" - -/* - * Test configuration - */ -#define NUM_IP_ADDRS 4 -#define NUM_PORTS_PER_IP 32 -#define NUM_FLOWS (NUM_IP_ADDRS * NUM_PORTS_PER_IP) -#define MAX_NUM_IF 4 /* max number of used interfaces */ -#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ -#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES - -#define IP_ADDR_A 192 -#define IP_ADDR_B 168 -#define IP_ADDR_C 1 -#define IP_ADDR_D 16 - -#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ - (IP_ADDR_C << 8) | (IP_ADDR_D)) -#define UDP_PORT_BASE 1024 - -/** - * The number of different EM queue priority levels to use - fixed. - */ -#define Q_PRIO_LEVELS 1 - -/** - * The number of processing stages for a flow, i.e. the number of EO's a - * packet will go through before being sent back out - fixed. - */ -#define PROCESSING_STAGES 3 - -/** - * Test with different queue types if set to '1': - * ATOMIC, PARALLELL, PARALLEL_ORDERED and LOCAL - */ -#define QUEUE_TYPE_MIX 1 /* 0=False or 1=True(default) */ - -/** - * Set the used Queue-type for the benchmarking cases when using only - * one queue type, i.e. valid only when QUEUE_TYPE_MIX is '0' - */ -#define QUEUE_TYPE EM_QUEUE_TYPE_LOCAL -/* #define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ -/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ - -/** - * The number of Queue Type permutations: - * Four (4) queue types (LOCAL, ATOMIC, PARALLELL, PARALLEL-ORDERED) in - * three (3) stages gives 4*4*4 = 64 permutations. - * Only used if QUEUE_TYPE_MIX is '1' - */ -#define QUEUE_TYPE_PERMUTATIONS (4 * 4 * 4) - -/** - * Select whether the UDP ports should be unique over all IP-interfaces - * (set to 1) or reused per IP-interface (thus each UDP port is configured once - * for each IP-interface). Using '0' (not unique) makes it easier to copy - * traffic generator settings from one IF-port to another as only the dst-IP - * address has to be changed. - */ -#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ - -/** Select whether the input and output ports should be cross-connected. */ -#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ - -/** Enable per packet error checking */ -#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ - -/** - * Test em_alloc and em_free per packet - * - * Alloc new event, copy event, free old event - */ -#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ - -#define IS_ODD(x) (((x) & 0x1)) -#define IS_EVEN(x) (!IS_ODD(x)) - -/** - * EO context, use common struct for all three EOs - */ -typedef struct { - em_eo_t eo; - em_queue_t default_queue; /* Only used by the first EO handling pktin */ -} eo_context_t; - -/** - * Save the dst IP, protocol and port in the queue-context. - * Verify (if error checking enabled) that the received packet matches the - * configuration for the queue. - */ -typedef struct flow_params_ { - uint32_t ipv4; - uint16_t port; - uint8_t proto; - uint8_t _pad; -} flow_params_t; - -/** - * Queue context, i.e. queue specific data - */ -typedef struct { - /** saved flow params for the EM-queue */ - flow_params_t flow_params; - /** The destination queue of the next stage in the pipeline */ - em_queue_t dst_queue; -} queue_context_1st_t; - -/** - * Queue context, i.e. queue specific data - */ -typedef struct { - em_queue_t dst_queue; -} queue_context_2nd_t; - -/** - * Queue context, i.e. queue specific data - */ -typedef struct { - /** a pktout queue for each interface, precalculated */ - em_queue_t pktout_queue[MAX_IF_ID + 1]; -} queue_context_3rd_t; - -/** - * Queue types used by the three chained EOs processing a flow - */ -typedef struct { - em_queue_type_t queue_type_1st; - em_queue_type_t queue_type_2nd; - em_queue_type_t queue_type_3rd; - /* Note: 'queue_type_4th' is always 'EM_QUEUE_TYPE_PKTOUT' */ -} queue_type_tuple_t; - -/** - * Packet Multi-Stage shared memory - * Read-only after start-up, no cache-line separation needed. - */ -typedef struct { - /** EO (application) contexts */ - eo_context_t eo_ctx[PROCESSING_STAGES]; - /** - * Arrays containing the contexts of all the queues handled by the EOs. - * A queue context contains the flow/queue specific data for the - * application EO. - */ - queue_context_1st_t eo_q_ctx_1st[NUM_FLOWS]; - queue_context_2nd_t eo_q_ctx_2nd[NUM_FLOWS]; - queue_context_3rd_t eo_q_ctx_3rd[NUM_FLOWS]; - /* pktout queues: accessed by if_id, thus empty middle slots possible */ - em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; - /** the number of packet output queues to use per interface */ - int pktout_queues_per_if; - /** interface count as provided by appl_conf to test_start() */ - int if_count; - /** interface ids as provided via appl_conf_t to test_start() */ - int if_ids[MAX_NUM_IF]; - /* All possible permutations of the used queue types */ - queue_type_tuple_t q_type_permutations[QUEUE_TYPE_PERMUTATIONS]; -} packet_multi_stage_shm_t; - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL packet_multi_stage_shm_t *pkt_shm; - -static em_status_t -mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo); - -static void -receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static em_status_t -stop_eo(void *eo_context, em_eo_t eo); - -static em_status_t -stop_eo_local(void *eo_context, em_eo_t eo); - -/* - * Helpers: - */ -static queue_type_tuple_t* -get_queue_type_tuple(int cnt); - -static void -fill_q_type_permutations(void); - -static em_queue_type_t -queue_types(int cnt); - -static void -set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]); - -static inline em_event_t -alloc_copy_free(em_event_t event); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Packet Multi-stage test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - pkt_shm = env_shared_reserve("PktMStageShMem", - sizeof(packet_multi_stage_shm_t)); - em_register_error_handler(mstage_error_handler); - } else { - pkt_shm = env_shared_lookup("PktMStageShMem"); - } - - if (pkt_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Packet Multi-Stage init failed on EM-core: %u", - em_core_id()); - else if (core == 0) - memset(pkt_shm, 0, sizeof(packet_multi_stage_shm_t)); -} - -/** - * Startup of the Packet Multi-stage test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo_1st, eo_2nd, eo_3rd; - em_queue_t default_queue, pktout_queue; - em_queue_t queue_1st, queue_2nd, queue_3rd; - em_queue_t tmp_q; - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; /* platform specific */ - pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ - queue_context_1st_t *q_ctx_1st; - queue_context_2nd_t *q_ctx_2nd; - queue_context_3rd_t *q_ctx_3rd; - queue_type_tuple_t *q_type_tuple; - em_status_t ret, start_fn_ret = EM_ERROR; - uint16_t port_offset = (uint16_t)-1; - int q_ctx_idx = 0; - int if_id, i, j; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads); - - test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || - appl_conf->pktio.if_count <= 0, - "Invalid number of interfaces given:%d - need 1-%d(MAX)", - appl_conf->pktio.if_count, MAX_NUM_IF); - - /* Store the number of pktio interfaces used */ - pkt_shm->if_count = appl_conf->pktio.if_count; - /* Store the used interface ids */ - for (i = 0; i < appl_conf->pktio.if_count; i++) { - if_id = appl_conf->pktio.if_ids[i]; - test_fatal_if(if_id > MAX_IF_ID, - "Interface id out of range! %d > %d(MAX)", - if_id, MAX_IF_ID); - pkt_shm->if_ids[i] = if_id; - } - - /* Use different prios for the queues */ - const em_queue_prio_t q_prio[Q_PRIO_LEVELS] = {EM_QUEUE_PRIO_NORMAL}; - - /* Initialize the Queue-type permutations array */ - fill_q_type_permutations(); - - /* - * Create packet output queues. - * - * Dimension the number of pktout queues to be equal to the number - * of EM cores per interface to minimize output resource contention. - */ - test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, - "No room to store pktout queues"); - pkt_shm->pktout_queues_per_if = em_core_count(); - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - - /* Output-queue callback function (em_output_func_t) */ - output_conf.output_fn = pktio_tx; - /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ - output_conf.output_fn_args = &pktio_tx_fn_args; - output_conf.args_len = sizeof(pktio_tx_fn_args_t); - /* Content of 'pktio_tx_fn_args' set in loop */ - - /* Create the packet output queues for each interface */ - for (i = 0; i < pkt_shm->if_count; i++) { - if_id = pkt_shm->if_ids[i]; - for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktio_tx_fn_args.if_id = if_id; - pktout_queue = - em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, &queue_conf); - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue create failed:%d,%d", i, j); - pkt_shm->pktout_queue[if_id][j] = pktout_queue; - } - } - - /* Create EOs, 3 stages of processing for each flow */ - memset(pkt_shm->eo_ctx, 0, sizeof(pkt_shm->eo_ctx)); - eo_1st = em_eo_create("packet_mstage_1st", start_eo, start_eo_local, - stop_eo, stop_eo_local, receive_packet_eo_1st, - &pkt_shm->eo_ctx[0]); - eo_2nd = em_eo_create("packet_mstage_2nd", start_eo, start_eo_local, - stop_eo, stop_eo_local, receive_packet_eo_2nd, - &pkt_shm->eo_ctx[1]); - eo_3rd = em_eo_create("packet_mstage_3rd", start_eo, start_eo_local, - stop_eo, stop_eo_local, receive_packet_eo_3rd, - &pkt_shm->eo_ctx[2]); - - /* Start the EOs */ - ret = em_eo_start_sync(eo_3rd, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - ret = em_eo_start_sync(eo_2nd, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - ret = em_eo_start_sync(eo_1st, &start_fn_ret, NULL); - test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, - "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", - ret, start_fn_ret); - - /* - * Default queue for all packets, handled by EO 1, receives all - * unwanted packets (EO 1 drops them) - * Note: The queue type is EM_QUEUE_TYPE_LOCAL for fast drop! - */ - default_queue = em_queue_create("default", EM_QUEUE_TYPE_LOCAL, - EM_QUEUE_PRIO_LOWEST, - EM_QUEUE_GROUP_UNDEF, NULL); - test_fatal_if(default_queue == EM_QUEUE_UNDEF, - "Default Queue creation failed!"); - - /* Store the default queue Id on the EO-context data */ - pkt_shm->eo_ctx[0].default_queue = default_queue; - - /* Associate the queue with EO 1 */ - ret = em_eo_add_queue_sync(eo_1st, default_queue); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo_1st, default_queue); - - /* Zero the Queue context arrays */ - memset(pkt_shm->eo_q_ctx_1st, 0, sizeof(pkt_shm->eo_q_ctx_1st)); - memset(pkt_shm->eo_q_ctx_2nd, 0, sizeof(pkt_shm->eo_q_ctx_2nd)); - memset(pkt_shm->eo_q_ctx_3rd, 0, sizeof(pkt_shm->eo_q_ctx_3rd)); - - /* Create queues for the input packet flows */ - for (i = 0; i < NUM_IP_ADDRS; i++) { - char ip_str[sizeof("255.255.255.255")]; - uint32_t ip_addr = IP_ADDR_BASE + i; - - ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); - - for (j = 0; j < NUM_PORTS_PER_IP; j++) { - uint16_t udp_port; - em_queue_prio_t prio; - em_queue_type_t queue_type; - em_queue_group_t queue_group = EM_QUEUE_GROUP_DEFAULT; - - if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ - port_offset++; - else /* Same UDP-ports per IP-interface */ - port_offset = j; - - udp_port = UDP_PORT_BASE + port_offset; - /* Get the queue types for this 3-tuple */ - q_type_tuple = get_queue_type_tuple(q_ctx_idx); - /* Get the queue priority for this 3-tuple */ - prio = q_prio[q_ctx_idx % Q_PRIO_LEVELS]; - - /* - * Create the packet-IO (input/Rx) queue - * for 'eo_1st' for this flow - */ - queue_type = q_type_tuple->queue_type_1st; - if (queue_type == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - queue_1st = em_queue_create("udp_port", queue_type, - prio, queue_group, NULL); - test_fatal_if(queue_1st == EM_QUEUE_UNDEF, - "1.Queue create fail: UDP-port %d", - udp_port); - - q_ctx_1st = &pkt_shm->eo_q_ctx_1st[q_ctx_idx]; - ret = em_queue_set_context(queue_1st, q_ctx_1st); - test_fatal_if(ret != EM_OK, - "Queue-ctx set failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue_1st); - - ret = em_eo_add_queue_sync(eo_1st, queue_1st); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo_1st, queue_1st); - - /* - * Create the middle queue for 'eo_2nd' for this flow - */ - queue_type = q_type_tuple->queue_type_2nd; - if (queue_type == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - queue_2nd = em_queue_create("udp_port", queue_type, - prio, queue_group, NULL); - test_fatal_if(queue_2nd == EM_QUEUE_UNDEF, - "2.Queue create fail: UDP-port %d", - udp_port); - - q_ctx_2nd = &pkt_shm->eo_q_ctx_2nd[q_ctx_idx]; - ret = em_queue_set_context(queue_2nd, q_ctx_2nd); - test_fatal_if(ret != EM_OK, - "Q-ctx set failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue_2nd); - - ret = em_eo_add_queue_sync(eo_2nd, queue_2nd); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo_2nd, queue_2nd); - - /* Save stage1 dst queue */ - q_ctx_1st->dst_queue = queue_2nd; - - /* - * Create the last queue for 'eo_3rd' for this flow, - * eo-3rd sends the event/packet out to where it - * originally came from - */ - queue_type = q_type_tuple->queue_type_3rd; - if (queue_type == EM_QUEUE_TYPE_LOCAL) - queue_group = EM_QUEUE_GROUP_UNDEF; - else - queue_group = EM_QUEUE_GROUP_DEFAULT; - queue_3rd = em_queue_create("udp_port", queue_type, - prio, queue_group, NULL); - test_fatal_if(queue_3rd == EM_QUEUE_UNDEF, - "3.Queue create fail: UDP-port %d", - udp_port); - - q_ctx_3rd = &pkt_shm->eo_q_ctx_3rd[q_ctx_idx]; - ret = em_queue_set_context(queue_3rd, q_ctx_3rd); - test_fatal_if(ret != EM_OK, - "Q-ctx set failed:%" PRI_STAT "\n" - "EO-q-ctx:%d Q:%" PRI_QUEUE "", - ret, q_ctx_idx, queue_3rd); - - ret = em_eo_add_queue_sync(eo_3rd, queue_3rd); - test_fatal_if(ret != EM_OK, - "Add queue failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, eo_3rd, queue_3rd); - - /* Save stage2 dst queue */ - q_ctx_2nd->dst_queue = queue_3rd; - - /* - * Set the pktout queues to use for this queue, - * one pktout queue per interface. - */ - set_pktout_queues(q_ctx_idx, - q_ctx_3rd->pktout_queue/*out*/); - - /* - * Direct this ip_addr:udp_port into the first queue - */ - pktio_add_queue(IPV4_PROTO_UDP, ip_addr, udp_port, - queue_1st); - - /* Save the flow params for debug checks in Rx */ - q_ctx_1st->flow_params.ipv4 = ip_addr; - q_ctx_1st->flow_params.port = udp_port; - q_ctx_1st->flow_params.proto = IPV4_PROTO_UDP; - - /* Sanity checks (lookup what was configured above) */ - tmp_q = pktio_lookup_sw(IPV4_PROTO_UDP, - ip_addr, udp_port); - test_fatal_if(tmp_q == EM_QUEUE_UNDEF || - tmp_q != queue_1st, - "Lookup fails IP:UDP %s:%d\n" - "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", - ip_str, udp_port, queue_1st, - tmp_q); - /* Print first and last mapping */ - if (q_ctx_idx == 0 || - q_ctx_idx == (NUM_IP_ADDRS * NUM_PORTS_PER_IP - 1)) - APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", - ip_str, udp_port, tmp_q); - - /* Update the Queue Context Index for the next round*/ - q_ctx_idx++; - } - } - - /* - * Direct all non-lookup hit packets into this queue. - * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue - */ - pktio_default_queue(default_queue); - - env_sync_mem(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_status_t ret; - em_queue_t pktout_queue; - int if_id; - int i, j; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - for (i = 0; i < PROCESSING_STAGES; i++) { - em_eo_t eo = pkt_shm->eo_ctx[i].eo; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - } - - for (i = 0; i < pkt_shm->if_count; i++) { - if_id = pkt_shm->if_ids[i]; - for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { - /* pktout queue tied to interface id 'if_id' */ - pktout_queue = pkt_shm->pktout_queue[if_id][j]; - test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, - "Pktout queue undef:%d,%d", i, j); - ret = em_queue_delete(pktout_queue); - test_fatal_if(ret != EM_OK, - "Pktout queue delete failed:%d,%d", i, j); - } - } -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(pkt_shm); - em_unregister_error_handler(); - } -} - -/** - * EO start function (run once at startup on ONE core) - */ -static em_status_t -start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo = eo; - /* eo_ctx->default_queue = Stored earlier in packet_multi_stage_start*/ - - env_sync_mem(); - - return EM_OK; -} - -/** - * EO Local start function (run once at startup on EACH core) - */ -static em_status_t -start_eo_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("Core%i: EO %" PRI_EO " local start.\n", em_core_id(), eo); - - return EM_OK; -} - -/** - * EO stop function - */ -static em_status_t -stop_eo(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * EO local stop function - */ -static em_status_t -stop_eo_local(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("Core%i: EO %" PRI_EO " local stop.\n", em_core_id(), eo); - - return EM_OK; -} - -/** - * EO_1st receive function - */ -static void -receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - eo_context_t *const eo_ctx = eo_context; - queue_context_1st_t *const q_ctx = queue_context; - em_status_t status; - - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - /* Drop everything from the default queue */ - if (unlikely(queue == eo_ctx->default_queue)) { - static ENV_LOCAL uint64_t drop_cnt = 1; - - /* - * Print notice about pkt drop for the first pkt only to avoid - * flooding the terminal with prints. - */ - if (drop_cnt == 1) { - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - char ip_str[sizeof("255.255.255.255")]; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); - APPL_PRINT("Drop: pkt received from %s:%u, core%d\n", - ip_str, port_dst, em_core_id()); - } - em_free(event); - drop_cnt++; - return; - } - - if (ENABLE_ERROR_CHECKS) { /* Check IP address and port */ - uint8_t proto; - uint32_t ipv4_dst; - uint16_t port_dst; - flow_params_t *const fp = &q_ctx->flow_params; - - pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); - - test_fatal_if(fp->ipv4 != ipv4_dst || - fp->port != port_dst || fp->proto != proto, - "Q:%" PRI_QUEUE " received illegal packet!\n" - "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" - "Abort!", queue, ipv4_dst, port_dst, proto, - fp->ipv4, fp->port, fp->proto); - } - - /* Send to the next stage for further processing. */ - status = em_send(event, q_ctx->dst_queue); - - if (unlikely(status != EM_OK)) - em_free(event); -} - -/** - * EO_2nd receive function - */ -static void -receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - queue_context_2nd_t *const q_ctx = queue_context; - em_status_t status; - - (void)type; - (void)eo_context; - (void)queue; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - /* Send to the next stage for further processing. */ - status = em_send(event, q_ctx->dst_queue); - - if (unlikely(status != EM_OK)) - em_free(event); -} - -/** - * EO_3rd receive function - */ -static void -receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - queue_context_3rd_t *const q_ctx = queue_context; - int in_port; - int out_port; - em_queue_t pktout_queue; - em_status_t status; - - (void)type; - (void)eo_context; - (void)queue; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - in_port = pktio_input_port(event); - - if (X_CONNECT_PORTS) - out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; - else - out_port = in_port; - - pktout_queue = q_ctx->pktout_queue[out_port]; - - /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ - pktio_swap_addrs(event); - - if (ALLOC_COPY_FREE) - event = alloc_copy_free(event); - - /* - * Send the packet buffer back out via the pktout queue through - * the 'out_port' - */ - status = em_send(event, pktout_queue); - if (unlikely(status != EM_OK)) - em_free(event); -} - -/** - * Alloc a new event, copy the contents&header into the new event - * and finally free the original event. Returns a pointer to the new event. - * - * Used for testing the performance impact of alloc-copy-free operations. - */ -static inline em_event_t -alloc_copy_free(em_event_t event) -{ - /* Copy the packet event */ - em_event_t new_event = pktio_copy_event(event); - - /* Free old event */ - em_free(event); - - return new_event; -} - -/** - * Helper func to determine queue types at startup - */ -static queue_type_tuple_t * -get_queue_type_tuple(int cnt) -{ - if (!QUEUE_TYPE_MIX) /* Always return the same kind of Queue types */ - return &pkt_shm->q_type_permutations[0]; - - /* Spread out over the different queue-types */ - const int idx = cnt % QUEUE_TYPE_PERMUTATIONS; - - return &pkt_shm->q_type_permutations[idx]; -} - -/** - * Helper func to initialize the Queue Type permutations array - * - * 4 queue types gives 4*4*4=64 permutations - store these. - */ -static void -fill_q_type_permutations(void) -{ - queue_type_tuple_t *tuple; - - if (!QUEUE_TYPE_MIX) { - tuple = &pkt_shm->q_type_permutations[0]; - /* Use the same type of queues everywhere. */ - tuple->queue_type_1st = QUEUE_TYPE; - tuple->queue_type_2nd = QUEUE_TYPE; - tuple->queue_type_3rd = QUEUE_TYPE; - return; - } - - int i, j, k; - em_queue_type_t queue_type_1st, queue_type_2nd, queue_type_3rd; - int nbr_q = 0; - - for (i = 0; i < 4; i++) { - for (j = 0; j < 4; j++) { - for (k = 0; k < 4; k++, nbr_q++) { - queue_type_1st = queue_types(i); - queue_type_2nd = queue_types(j); - queue_type_3rd = queue_types(k); - - tuple = &pkt_shm->q_type_permutations[nbr_q]; - tuple->queue_type_1st = queue_type_1st; - tuple->queue_type_2nd = queue_type_2nd; - tuple->queue_type_3rd = queue_type_3rd; - } - } - } -} - -/** - * Helper func, returns a Queue Type based on the input count. - */ -static em_queue_type_t -queue_types(int cnt) -{ - switch (cnt % 3) { - case 0: - return EM_QUEUE_TYPE_LOCAL; - case 1: - return EM_QUEUE_TYPE_ATOMIC; - case 2: - return EM_QUEUE_TYPE_PARALLEL; - default: - return EM_QUEUE_TYPE_PARALLEL_ORDERED; - } -} - -/** - * Helper func to store the packet output queues for a specific input queue - */ -static void -set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]) -{ - int if_count = pkt_shm->if_count; - int pktout_idx = q_idx % pkt_shm->pktout_queues_per_if; - int id, i; - - for (i = 0; i < if_count; i++) { - id = pkt_shm->if_ids[i]; - pktout_queue[id] = pkt_shm->pktout_queue[id][pktout_idx]; - } -} - -static em_status_t -mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args) -{ - /* - * Don't report/log/print em_send() errors, instead return the error - * code and let the application free the event that failed to be sent. - * This avoids a print/log storm in an overloaded situation, i.e. when - * sending input packets at a higher rate that can be sustained. - */ - if (!EM_ERROR_IS_FATAL(error) && - (escope == EM_ESCOPE_SEND || escope == EM_ESCOPE_SEND_MULTI)) - return error; - - return test_error_handler(eo, error, escope, args); -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Multi-staged packet-IO test application using also EM local queues + * (in addition to EM scheduled queues) between the EO pipeline stages. + * Using local queues bypasses the scheduling for a possible performance gain + * while still maintaining the separate EO pipeline stages. + * + * The created UDP flows are received and processed by three (3) chained EOs + * before sending the datagrams back out. Uses EM queues of different priority + * and type. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "cm_pktio.h" + +/* + * Test configuration + */ +#define NUM_IP_ADDRS 4 +#define NUM_PORTS_PER_IP 32 +#define NUM_FLOWS (NUM_IP_ADDRS * NUM_PORTS_PER_IP) +#define MAX_NUM_IF 4 /* max number of used interfaces */ +#define MAX_IF_ID 6 /* max interface identifier:[0-MAX], cnt:MAX+1 */ +#define MAX_PKTOUT_QUEUES_PER_IF EM_MAX_CORES + +#define IP_ADDR_A 192 +#define IP_ADDR_B 168 +#define IP_ADDR_C 1 +#define IP_ADDR_D 16 + +#define IP_ADDR_BASE ((IP_ADDR_A << 24) | (IP_ADDR_B << 16) | \ + (IP_ADDR_C << 8) | (IP_ADDR_D)) +#define UDP_PORT_BASE 1024 + +/** + * The number of different EM queue priority levels to use - fixed. + */ +#define Q_PRIO_LEVELS 1 + +/** + * The number of processing stages for a flow, i.e. the number of EO's a + * packet will go through before being sent back out - fixed. + */ +#define PROCESSING_STAGES 3 + +/** + * Test with different queue types if set to '1': + * ATOMIC, PARALLELL, PARALLEL_ORDERED and LOCAL + */ +#define QUEUE_TYPE_MIX 1 /* 0=False or 1=True(default) */ + +/** + * Set the used Queue-type for the benchmarking cases when using only + * one queue type, i.e. valid only when QUEUE_TYPE_MIX is '0' + */ +#define QUEUE_TYPE EM_QUEUE_TYPE_LOCAL +/* #define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL */ +/* #define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL_ORDERED */ + +/** + * The number of Queue Type permutations: + * Four (4) queue types (LOCAL, ATOMIC, PARALLELL, PARALLEL-ORDERED) in + * three (3) stages gives 4*4*4 = 64 permutations. + * Only used if QUEUE_TYPE_MIX is '1' + */ +#define QUEUE_TYPE_PERMUTATIONS (4 * 4 * 4) + +/** + * Select whether the UDP ports should be unique over all IP-interfaces + * (set to 1) or reused per IP-interface (thus each UDP port is configured once + * for each IP-interface). Using '0' (not unique) makes it easier to copy + * traffic generator settings from one IF-port to another as only the dst-IP + * address has to be changed. + */ +#define UDP_PORTS_UNIQUE 0 /* 0=False or 1=True */ + +/** Select whether the input and output ports should be cross-connected. */ +#define X_CONNECT_PORTS 0 /* 0=False or 1=True */ + +/** Enable per packet error checking */ +#define ENABLE_ERROR_CHECKS 0 /* 0=False or 1=True */ + +/** + * Test em_alloc and em_free per packet + * + * Alloc new event, copy event, free old event + */ +#define ALLOC_COPY_FREE 0 /* 0=False or 1=True */ + +#define IS_ODD(x) (((x) & 0x1)) +#define IS_EVEN(x) (!IS_ODD(x)) + +/** + * EO context, use common struct for all three EOs + */ +typedef struct { + em_eo_t eo; + em_queue_t default_queue; /* Only used by the first EO handling pktin */ +} eo_context_t; + +/** + * Save the dst IP, protocol and port in the queue-context. + * Verify (if error checking enabled) that the received packet matches the + * configuration for the queue. + */ +typedef struct flow_params_ { + uint32_t ipv4; + uint16_t port; + uint8_t proto; + uint8_t _pad; +} flow_params_t; + +/** + * Queue context, i.e. queue specific data + */ +typedef struct { + /** saved flow params for the EM-queue */ + flow_params_t flow_params; + /** The destination queue of the next stage in the pipeline */ + em_queue_t dst_queue; +} queue_context_1st_t; + +/** + * Queue context, i.e. queue specific data + */ +typedef struct { + em_queue_t dst_queue; +} queue_context_2nd_t; + +/** + * Queue context, i.e. queue specific data + */ +typedef struct { + /** a pktout queue for each interface, precalculated */ + em_queue_t pktout_queue[MAX_IF_ID + 1]; +} queue_context_3rd_t; + +/** + * Queue types used by the three chained EOs processing a flow + */ +typedef struct { + em_queue_type_t queue_type_1st; + em_queue_type_t queue_type_2nd; + em_queue_type_t queue_type_3rd; + /* Note: 'queue_type_4th' is always 'EM_QUEUE_TYPE_PKTOUT' */ +} queue_type_tuple_t; + +/** + * Packet Multi-Stage shared memory + * Read-only after start-up, no cache-line separation needed. + */ +typedef struct { + /** EO (application) contexts */ + eo_context_t eo_ctx[PROCESSING_STAGES]; + /** + * Arrays containing the contexts of all the queues handled by the EOs. + * A queue context contains the flow/queue specific data for the + * application EO. + */ + queue_context_1st_t eo_q_ctx_1st[NUM_FLOWS]; + queue_context_2nd_t eo_q_ctx_2nd[NUM_FLOWS]; + queue_context_3rd_t eo_q_ctx_3rd[NUM_FLOWS]; + /* pktout queues: accessed by if_id, thus empty middle slots possible */ + em_queue_t pktout_queue[MAX_IF_ID + 1][MAX_PKTOUT_QUEUES_PER_IF]; + /** the number of packet output queues to use per interface */ + int pktout_queues_per_if; + /** interface count as provided by appl_conf to test_start() */ + int if_count; + /** interface ids as provided via appl_conf_t to test_start() */ + int if_ids[MAX_NUM_IF]; + /* All possible permutations of the used queue types */ + queue_type_tuple_t q_type_permutations[QUEUE_TYPE_PERMUTATIONS]; +} packet_multi_stage_shm_t; + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL packet_multi_stage_shm_t *pkt_shm; + +static em_status_t +mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo); + +static void +receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static em_status_t +stop_eo(void *eo_context, em_eo_t eo); + +static em_status_t +stop_eo_local(void *eo_context, em_eo_t eo); + +/* + * Helpers: + */ +static queue_type_tuple_t* +get_queue_type_tuple(int cnt); + +static void +fill_q_type_permutations(void); + +static em_queue_type_t +queue_types(int cnt); + +static void +set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]); + +static inline em_event_t +alloc_copy_free(em_event_t event); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Packet Multi-stage test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + pkt_shm = env_shared_reserve("PktMStageShMem", + sizeof(packet_multi_stage_shm_t)); + em_register_error_handler(mstage_error_handler); + } else { + pkt_shm = env_shared_lookup("PktMStageShMem"); + } + + if (pkt_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Packet Multi-Stage init failed on EM-core: %u", + em_core_id()); + else if (core == 0) + memset(pkt_shm, 0, sizeof(packet_multi_stage_shm_t)); +} + +/** + * Startup of the Packet Multi-stage test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo_1st, eo_2nd, eo_3rd; + em_queue_t default_queue, pktout_queue; + em_queue_t queue_1st, queue_2nd, queue_3rd; + em_queue_t tmp_q; + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; /* platform specific */ + pktio_tx_fn_args_t pktio_tx_fn_args; /* user defined content */ + queue_context_1st_t *q_ctx_1st; + queue_context_2nd_t *q_ctx_2nd; + queue_context_3rd_t *q_ctx_3rd; + queue_type_tuple_t *q_type_tuple; + em_status_t ret, start_fn_ret = EM_ERROR; + uint16_t port_offset = (uint16_t)-1; + int q_ctx_idx = 0; + int if_id, i, j; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads); + + test_fatal_if(appl_conf->pktio.if_count > MAX_NUM_IF || + appl_conf->pktio.if_count <= 0, + "Invalid number of interfaces given:%d - need 1-%d(MAX)", + appl_conf->pktio.if_count, MAX_NUM_IF); + + pktin_mode_t pktin_mode = appl_conf->pktio.in_mode; + + test_fatal_if(!pktin_polled_mode(pktin_mode), + "Invalid pktin-mode: %s(%i).\n" + "Application:%s supports only polled pktin-modes: %s(%i), %s(%i)", + pktin_mode_str(pktin_mode), pktin_mode, + appl_conf->name, + pktin_mode_str(DIRECT_RECV), DIRECT_RECV, + pktin_mode_str(PLAIN_QUEUE), PLAIN_QUEUE); + + /* Store the number of pktio interfaces used */ + pkt_shm->if_count = appl_conf->pktio.if_count; + /* Store the used interface ids */ + for (i = 0; i < appl_conf->pktio.if_count; i++) { + if_id = appl_conf->pktio.if_ids[i]; + test_fatal_if(if_id > MAX_IF_ID, + "Interface id out of range! %d > %d(MAX)", + if_id, MAX_IF_ID); + pkt_shm->if_ids[i] = if_id; + } + + /* Use different prios for the queues */ + const em_queue_prio_t q_prio[Q_PRIO_LEVELS] = {EM_QUEUE_PRIO_NORMAL}; + + /* Initialize the Queue-type permutations array */ + fill_q_type_permutations(); + + /* + * Create packet output queues. + * + * Dimension the number of pktout queues to be equal to the number + * of EM cores per interface to minimize output resource contention. + */ + test_fatal_if(em_core_count() >= MAX_PKTOUT_QUEUES_PER_IF, + "No room to store pktout queues"); + pkt_shm->pktout_queues_per_if = em_core_count(); + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + + /* Output-queue callback function (em_output_func_t) */ + output_conf.output_fn = pktio_tx; + /* Callback function extra argument, here a 'pktio_tx_fn_args_t' ptr */ + output_conf.output_fn_args = &pktio_tx_fn_args; + output_conf.args_len = sizeof(pktio_tx_fn_args_t); + /* Content of 'pktio_tx_fn_args' set in loop */ + + /* Create the packet output queues for each interface */ + for (i = 0; i < pkt_shm->if_count; i++) { + if_id = pkt_shm->if_ids[i]; + for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktio_tx_fn_args.if_id = if_id; + pktout_queue = + em_queue_create("pktout-queue", EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, &queue_conf); + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue create failed:%d,%d", i, j); + pkt_shm->pktout_queue[if_id][j] = pktout_queue; + } + } + + /* Create EOs, 3 stages of processing for each flow */ + memset(pkt_shm->eo_ctx, 0, sizeof(pkt_shm->eo_ctx)); + eo_1st = em_eo_create("packet_mstage_1st", start_eo, start_eo_local, + stop_eo, stop_eo_local, receive_packet_eo_1st, + &pkt_shm->eo_ctx[0]); + eo_2nd = em_eo_create("packet_mstage_2nd", start_eo, start_eo_local, + stop_eo, stop_eo_local, receive_packet_eo_2nd, + &pkt_shm->eo_ctx[1]); + eo_3rd = em_eo_create("packet_mstage_3rd", start_eo, start_eo_local, + stop_eo, stop_eo_local, receive_packet_eo_3rd, + &pkt_shm->eo_ctx[2]); + + /* Start the EOs */ + ret = em_eo_start_sync(eo_3rd, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + ret = em_eo_start_sync(eo_2nd, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + ret = em_eo_start_sync(eo_1st, &start_fn_ret, NULL); + test_fatal_if(ret != EM_OK || start_fn_ret != EM_OK, + "em_eo_start_sync() failed:%" PRI_STAT " %" PRI_STAT "", + ret, start_fn_ret); + + /* + * Default queue for all packets, handled by EO 1, receives all + * unwanted packets (EO 1 drops them) + * Note: The queue type is EM_QUEUE_TYPE_LOCAL for fast drop! + */ + default_queue = em_queue_create("default", EM_QUEUE_TYPE_LOCAL, + EM_QUEUE_PRIO_LOWEST, + EM_QUEUE_GROUP_UNDEF, NULL); + test_fatal_if(default_queue == EM_QUEUE_UNDEF, + "Default Queue creation failed!"); + + /* Store the default queue Id on the EO-context data */ + pkt_shm->eo_ctx[0].default_queue = default_queue; + + /* Associate the queue with EO 1 */ + ret = em_eo_add_queue_sync(eo_1st, default_queue); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo_1st, default_queue); + + /* Zero the Queue context arrays */ + memset(pkt_shm->eo_q_ctx_1st, 0, sizeof(pkt_shm->eo_q_ctx_1st)); + memset(pkt_shm->eo_q_ctx_2nd, 0, sizeof(pkt_shm->eo_q_ctx_2nd)); + memset(pkt_shm->eo_q_ctx_3rd, 0, sizeof(pkt_shm->eo_q_ctx_3rd)); + + /* Create queues for the input packet flows */ + for (i = 0; i < NUM_IP_ADDRS; i++) { + char ip_str[sizeof("255.255.255.255")]; + uint32_t ip_addr = IP_ADDR_BASE + i; + + ipaddr_tostr(ip_addr, ip_str, sizeof(ip_str)); + + for (j = 0; j < NUM_PORTS_PER_IP; j++) { + uint16_t udp_port; + em_queue_prio_t prio; + em_queue_type_t queue_type; + em_queue_group_t queue_group = EM_QUEUE_GROUP_DEFAULT; + + if (UDP_PORTS_UNIQUE) /* Every UDP-port is different */ + port_offset++; + else /* Same UDP-ports per IP-interface */ + port_offset = j; + + udp_port = UDP_PORT_BASE + port_offset; + /* Get the queue types for this 3-tuple */ + q_type_tuple = get_queue_type_tuple(q_ctx_idx); + /* Get the queue priority for this 3-tuple */ + prio = q_prio[q_ctx_idx % Q_PRIO_LEVELS]; + + /* + * Create the packet-IO (input/Rx) queue + * for 'eo_1st' for this flow + */ + queue_type = q_type_tuple->queue_type_1st; + if (queue_type == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + queue_1st = em_queue_create("udp_port", queue_type, + prio, queue_group, NULL); + test_fatal_if(queue_1st == EM_QUEUE_UNDEF, + "1.Queue create fail: UDP-port %d", + udp_port); + + q_ctx_1st = &pkt_shm->eo_q_ctx_1st[q_ctx_idx]; + ret = em_queue_set_context(queue_1st, q_ctx_1st); + test_fatal_if(ret != EM_OK, + "Queue-ctx set failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue_1st); + + ret = em_eo_add_queue_sync(eo_1st, queue_1st); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo_1st, queue_1st); + + /* + * Create the middle queue for 'eo_2nd' for this flow + */ + queue_type = q_type_tuple->queue_type_2nd; + if (queue_type == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + queue_2nd = em_queue_create("udp_port", queue_type, + prio, queue_group, NULL); + test_fatal_if(queue_2nd == EM_QUEUE_UNDEF, + "2.Queue create fail: UDP-port %d", + udp_port); + + q_ctx_2nd = &pkt_shm->eo_q_ctx_2nd[q_ctx_idx]; + ret = em_queue_set_context(queue_2nd, q_ctx_2nd); + test_fatal_if(ret != EM_OK, + "Q-ctx set failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue_2nd); + + ret = em_eo_add_queue_sync(eo_2nd, queue_2nd); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo_2nd, queue_2nd); + + /* Save stage1 dst queue */ + q_ctx_1st->dst_queue = queue_2nd; + + /* + * Create the last queue for 'eo_3rd' for this flow, + * eo-3rd sends the event/packet out to where it + * originally came from + */ + queue_type = q_type_tuple->queue_type_3rd; + if (queue_type == EM_QUEUE_TYPE_LOCAL) + queue_group = EM_QUEUE_GROUP_UNDEF; + else + queue_group = EM_QUEUE_GROUP_DEFAULT; + queue_3rd = em_queue_create("udp_port", queue_type, + prio, queue_group, NULL); + test_fatal_if(queue_3rd == EM_QUEUE_UNDEF, + "3.Queue create fail: UDP-port %d", + udp_port); + + q_ctx_3rd = &pkt_shm->eo_q_ctx_3rd[q_ctx_idx]; + ret = em_queue_set_context(queue_3rd, q_ctx_3rd); + test_fatal_if(ret != EM_OK, + "Q-ctx set failed:%" PRI_STAT "\n" + "EO-q-ctx:%d Q:%" PRI_QUEUE "", + ret, q_ctx_idx, queue_3rd); + + ret = em_eo_add_queue_sync(eo_3rd, queue_3rd); + test_fatal_if(ret != EM_OK, + "Add queue failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, eo_3rd, queue_3rd); + + /* Save stage2 dst queue */ + q_ctx_2nd->dst_queue = queue_3rd; + + /* + * Set the pktout queues to use for this queue, + * one pktout queue per interface. + */ + set_pktout_queues(q_ctx_idx, + q_ctx_3rd->pktout_queue/*out*/); + + /* + * Direct this ip_addr:udp_port into the first queue + */ + pktio_add_queue(IPV4_PROTO_UDP, ip_addr, udp_port, + queue_1st); + + /* Save the flow params for debug checks in Rx */ + q_ctx_1st->flow_params.ipv4 = ip_addr; + q_ctx_1st->flow_params.port = udp_port; + q_ctx_1st->flow_params.proto = IPV4_PROTO_UDP; + + /* Sanity checks (lookup what was configured above) */ + tmp_q = pktio_lookup_sw(IPV4_PROTO_UDP, + ip_addr, udp_port); + test_fatal_if(tmp_q == EM_QUEUE_UNDEF || + tmp_q != queue_1st, + "Lookup fails IP:UDP %s:%d\n" + "Q:%" PRI_QUEUE "!=%" PRI_QUEUE "", + ip_str, udp_port, queue_1st, + tmp_q); + /* Print first and last mapping */ + if (q_ctx_idx == 0 || + q_ctx_idx == (NUM_IP_ADDRS * NUM_PORTS_PER_IP - 1)) + APPL_PRINT("IP:prt->Q %s:%u->%" PRI_QUEUE "\n", + ip_str, udp_port, tmp_q); + + /* Update the Queue Context Index for the next round*/ + q_ctx_idx++; + } + } + + /* + * Direct all non-lookup hit packets into this queue. + * Note: if QUEUE_PER_FLOW is '0' then ALL packets end up in this queue + */ + pktio_default_queue(default_queue); + + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_status_t ret; + em_queue_t pktout_queue; + int if_id; + int i, j; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < PROCESSING_STAGES; i++) { + em_eo_t eo = pkt_shm->eo_ctx[i].eo; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } + + for (i = 0; i < pkt_shm->if_count; i++) { + if_id = pkt_shm->if_ids[i]; + for (j = 0; j < pkt_shm->pktout_queues_per_if; j++) { + /* pktout queue tied to interface id 'if_id' */ + pktout_queue = pkt_shm->pktout_queue[if_id][j]; + test_fatal_if(pktout_queue == EM_QUEUE_UNDEF, + "Pktout queue undef:%d,%d", i, j); + ret = em_queue_delete(pktout_queue); + test_fatal_if(ret != EM_OK, + "Pktout queue delete failed:%d,%d", i, j); + } + } +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(pkt_shm); + em_unregister_error_handler(); + } +} + +/** + * EO start function (run once at startup on ONE core) + */ +static em_status_t +start_eo(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo = eo; + /* eo_ctx->default_queue = Stored earlier in packet_multi_stage_start*/ + + env_sync_mem(); + + return EM_OK; +} + +/** + * EO Local start function (run once at startup on EACH core) + */ +static em_status_t +start_eo_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("Core%i: EO %" PRI_EO " local start.\n", em_core_id(), eo); + + return EM_OK; +} + +/** + * EO stop function + */ +static em_status_t +stop_eo(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * EO local stop function + */ +static em_status_t +stop_eo_local(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("Core%i: EO %" PRI_EO " local stop.\n", em_core_id(), eo); + + return EM_OK; +} + +/** + * EO_1st receive function + */ +static void +receive_packet_eo_1st(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + eo_context_t *const eo_ctx = eo_context; + queue_context_1st_t *const q_ctx = queue_context; + em_status_t status; + + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + /* Drop everything from the default queue */ + if (unlikely(queue == eo_ctx->default_queue)) { + static ENV_LOCAL uint64_t drop_cnt = 1; + + /* + * Print notice about pkt drop for the first pkt only to avoid + * flooding the terminal with prints. + */ + if (drop_cnt == 1) { + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + char ip_str[sizeof("255.255.255.255")]; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + ipaddr_tostr(ipv4_dst, ip_str, sizeof(ip_str)); + APPL_PRINT("Drop: pkt received from %s:%u, core%d\n", + ip_str, port_dst, em_core_id()); + } + em_free(event); + drop_cnt++; + return; + } + + if (ENABLE_ERROR_CHECKS) { /* Check IP address and port */ + uint8_t proto; + uint32_t ipv4_dst; + uint16_t port_dst; + flow_params_t *const fp = &q_ctx->flow_params; + + pktio_get_dst(event, &proto, &ipv4_dst, &port_dst); + + test_fatal_if(fp->ipv4 != ipv4_dst || + fp->port != port_dst || fp->proto != proto, + "Q:%" PRI_QUEUE " received illegal packet!\n" + "rcv: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "cfg: IP:0x%" PRIx32 ":%" PRIu16 ".%" PRIu8 "\n" + "Abort!", queue, ipv4_dst, port_dst, proto, + fp->ipv4, fp->port, fp->proto); + } + + /* Send to the next stage for further processing. */ + status = em_send(event, q_ctx->dst_queue); + + if (unlikely(status != EM_OK)) + em_free(event); +} + +/** + * EO_2nd receive function + */ +static void +receive_packet_eo_2nd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + queue_context_2nd_t *const q_ctx = queue_context; + em_status_t status; + + (void)type; + (void)eo_context; + (void)queue; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + /* Send to the next stage for further processing. */ + status = em_send(event, q_ctx->dst_queue); + + if (unlikely(status != EM_OK)) + em_free(event); +} + +/** + * EO_3rd receive function + */ +static void +receive_packet_eo_3rd(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + queue_context_3rd_t *const q_ctx = queue_context; + int in_port; + int out_port; + em_queue_t pktout_queue; + em_status_t status; + + (void)type; + (void)eo_context; + (void)queue; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + in_port = pktio_input_port(event); + + if (X_CONNECT_PORTS) + out_port = IS_EVEN(in_port) ? in_port + 1 : in_port - 1; + else + out_port = in_port; + + pktout_queue = q_ctx->pktout_queue[out_port]; + + /* Touch packet. Swap MAC, IP-addrs and UDP-ports: scr<->dst */ + pktio_swap_addrs(event); + + if (ALLOC_COPY_FREE) + event = alloc_copy_free(event); + + /* + * Send the packet buffer back out via the pktout queue through + * the 'out_port' + */ + status = em_send(event, pktout_queue); + if (unlikely(status != EM_OK)) + em_free(event); +} + +/** + * Alloc a new event, copy the contents&header into the new event + * and finally free the original event. Returns a pointer to the new event. + * + * Used for testing the performance impact of alloc-copy-free operations. + */ +static inline em_event_t +alloc_copy_free(em_event_t event) +{ + /* Copy the packet event */ + em_event_t new_event = pktio_copy_event(event); + + /* Free old event */ + em_free(event); + + return new_event; +} + +/** + * Helper func to determine queue types at startup + */ +static queue_type_tuple_t * +get_queue_type_tuple(int cnt) +{ + if (!QUEUE_TYPE_MIX) /* Always return the same kind of Queue types */ + return &pkt_shm->q_type_permutations[0]; + + /* Spread out over the different queue-types */ + const int idx = cnt % QUEUE_TYPE_PERMUTATIONS; + + return &pkt_shm->q_type_permutations[idx]; +} + +/** + * Helper func to initialize the Queue Type permutations array + * + * 4 queue types gives 4*4*4=64 permutations - store these. + */ +static void +fill_q_type_permutations(void) +{ + queue_type_tuple_t *tuple; + + if (!QUEUE_TYPE_MIX) { + tuple = &pkt_shm->q_type_permutations[0]; + /* Use the same type of queues everywhere. */ + tuple->queue_type_1st = QUEUE_TYPE; + tuple->queue_type_2nd = QUEUE_TYPE; + tuple->queue_type_3rd = QUEUE_TYPE; + return; + } + + int i, j, k; + em_queue_type_t queue_type_1st, queue_type_2nd, queue_type_3rd; + int nbr_q = 0; + + for (i = 0; i < 4; i++) { + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++, nbr_q++) { + queue_type_1st = queue_types(i); + queue_type_2nd = queue_types(j); + queue_type_3rd = queue_types(k); + + tuple = &pkt_shm->q_type_permutations[nbr_q]; + tuple->queue_type_1st = queue_type_1st; + tuple->queue_type_2nd = queue_type_2nd; + tuple->queue_type_3rd = queue_type_3rd; + } + } + } +} + +/** + * Helper func, returns a Queue Type based on the input count. + */ +static em_queue_type_t +queue_types(int cnt) +{ + switch (cnt % 3) { + case 0: + return EM_QUEUE_TYPE_LOCAL; + case 1: + return EM_QUEUE_TYPE_ATOMIC; + case 2: + return EM_QUEUE_TYPE_PARALLEL; + default: + return EM_QUEUE_TYPE_PARALLEL_ORDERED; + } +} + +/** + * Helper func to store the packet output queues for a specific input queue + */ +static void +set_pktout_queues(int q_idx, em_queue_t pktout_queue[/*out*/]) +{ + int if_count = pkt_shm->if_count; + int pktout_idx = q_idx % pkt_shm->pktout_queues_per_if; + int id, i; + + for (i = 0; i < if_count; i++) { + id = pkt_shm->if_ids[i]; + pktout_queue[id] = pkt_shm->pktout_queue[id][pktout_idx]; + } +} + +static em_status_t +mstage_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args) +{ + /* + * Don't report/log/print em_send() errors, instead return the error + * code and let the application free the event that failed to be sent. + * This avoids a print/log storm in an overloaded situation, i.e. when + * sending input packets at a higher rate that can be sustained. + */ + if (!EM_ERROR_IS_FATAL(error) && + (escope == EM_ESCOPE_SEND || escope == EM_ESCOPE_SEND_MULTI)) + return error; + + return test_error_handler(eo, error, escope, args); +} diff --git a/programs/performance/Makefile.am b/programs/performance/Makefile.am index 7c29ac48..f4cd9006 100644 --- a/programs/performance/Makefile.am +++ b/programs/performance/Makefile.am @@ -4,12 +4,14 @@ noinst_PROGRAMS = atomic_processing_end \ pairs \ loop \ loop_multircv \ + loop_refs \ queue_groups \ queues \ queues_unscheduled \ queues_local \ send_multi \ - timer_test_periodic + timer_test_periodic \ + scheduling_latency atomic_processing_end_LDFLAGS = $(AM_LDFLAGS) atomic_processing_end_CFLAGS = $(AM_CFLAGS) @@ -23,6 +25,9 @@ loop_CFLAGS = $(AM_CFLAGS) loop_multircv_LDFLAGS = $(AM_LDFLAGS) loop_multircv_CFLAGS = $(AM_CFLAGS) +loop_refs_LDFLAGS = $(AM_LDFLAGS) +loop_refs_CFLAGS = $(AM_CFLAGS) + queue_groups_LDFLAGS = $(AM_LDFLAGS) queue_groups_CFLAGS = $(AM_CFLAGS) @@ -41,13 +46,18 @@ send_multi_CFLAGS = $(AM_CFLAGS) timer_test_periodic_LDFLAGS = $(AM_LDFLAGS) -lm timer_test_periodic_CFLAGS = $(AM_CFLAGS) +scheduling_latency_LDFLAGS = $(AM_LDFLAGS) -lm +scheduling_latency_CFLAGS = $(AM_CFLAGS) + dist_atomic_processing_end_SOURCES = atomic_processing_end.c dist_pairs_SOURCES = pairs.c dist_loop_SOURCES = loop.c dist_loop_multircv_SOURCES = loop_multircv.c +dist_loop_refs_SOURCES = loop_refs.c dist_queue_groups_SOURCES = queue_groups.c dist_queues_SOURCES = queues.c dist_queues_unscheduled_SOURCES = queues_unscheduled.c dist_queues_local_SOURCES = queues_local.c dist_send_multi_SOURCES = send_multi.c dist_timer_test_periodic_SOURCES = timer_test_periodic.c timer_test_periodic.h +scheduling_latency_SOURCES = scheduling_latency.c scheduling_latency.h diff --git a/programs/performance/atomic_processing_end.c b/programs/performance/atomic_processing_end.c index 9f42d4fd..d9516b3a 100644 --- a/programs/performance/atomic_processing_end.c +++ b/programs/performance/atomic_processing_end.c @@ -1,810 +1,809 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine em_atomic_processing_end() example - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for an EO pair using atomic queues and alternating between calling - * em_atomic_processing_end() and not calling it. - * Each EO's receive function will do some dummy work for each received event. - * The em_atomic_processing_end() is called before processing the dummy work to - * allow another core to continue processing events from the queue. Not calling - * em_atomic_processing_end() with a low number of queues and long per-event - * processing times will limit the throughput and the cycles/event result. - * For comparison, both results with em_atomic_processing_end()-calling enabled - * and disabled is shown. - * Note: Calling em_atomic_processing_end() will normally give worse - * performance except in cases when atomic event processing becomes a - * bottleneck by blocking other cores from doing their work (as this test - * tries to demonstrate). - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test configuration - */ - -/** - * Number of test EOs and queues. Must be an even number. - * Create a BOTTLENECK for atomic processing in the system by - * keeping the number smaller than the available cores. - */ -#define NUM_EO 2 /* BOTTLENECK */ - -/** Number of ping-pong events per EO pair. */ -#define NUM_EVENT 512 - -/** Number of data bytes in the event */ -#define DATA_SIZE 512 - -/** Max number of cores */ -#define MAX_NBR_OF_CORES 256 - -/** - * Number of "work" iterations to do per received event to show the - * possible benefit of em_atomic_processing_end() - */ -#define WORK_LOOPS 40 /* cause long per-event processing time */ - -/* The number of events to be received per core before printing results */ -#define PRINT_EVENT_COUNT 0x20000 - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -/* - * Per event processing options - */ - -/* Check sequence numbers, works only with atomic queues */ -#define CHECK_SEQ_PER_EVENT 1 /* 0=False or 1=True */ - -/** - * Performance test statistics (per core) - */ -typedef union { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - struct { - uint64_t events; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; - int atomic_processing_end; - int rounds; - int ready; - double cycles_per_event; - }; -} perf_stat_t; - -COMPILE_TIME_ASSERT(sizeof(perf_stat_t) == ENV_CACHE_LINE_SIZE, - PERF_STAT_T_SIZE_ERROR); - -/** - * Performance test EO context - */ -typedef struct { - /* EO context id */ - em_eo_t id; - /* Next sequence number (used with CHECK_SEQ_PER_EVENT) */ - int next_seq; - /* at startup: EO-A should allocate and send the test events */ - int initialize_events; -} eo_context_t; - -/** - * EO context padded to cache line size - */ -typedef union { - uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; - eo_context_t eo_ctx; -} eo_context_array_elem_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_array_elem_t) == ENV_CACHE_LINE_SIZE, - PERF_EO_CONTEXT_SIZE_ERROR); - -/** - * Performance test event - */ -typedef struct { - /* Next destination queue */ - em_queue_t dest; - /* Sequence number */ - int seq; - /* Test data */ - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Perf test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - /* EO context array */ - eo_context_array_elem_t perf_eo_context[NUM_EO] ENV_CACHE_LINE_ALIGNED; - /* Array of core specific data accessed by using its core index */ - perf_stat_t core_stat[MAX_NBR_OF_CORES] ENV_CACHE_LINE_ALIGNED; - /* Track the number of cores ready with the current measurement rounds*/ - env_atomic64_t ready_count ENV_CACHE_LINE_ALIGNED; - /* Track the number of cores that have seen that all others are ready */ - env_atomic64_t seen_all_ready; - /* Pad to size to a multiple of cache lines */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} perf_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; - -/* - * Local function prototypes - */ -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -perf_stop(void *eo_context, em_eo_t eo); - -static void -initialize_events(em_queue_t queue_a, em_queue_t queue_b); - -static void -perf_receive_a(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -perf_receive_b(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); -static void -calc_result(perf_stat_t *const perf_stat, const uint64_t events); - -static void -print_result(perf_stat_t *const perf_stat); - -static int -get_queue_priority(const int index); - -static void -check_seq_per_event(eo_context_t *const eo_ctx, perf_event_t *const perf, - em_queue_t queue); -static void -do_dummy_work(unsigned int work_loops); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Pairs performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(test_error_handler); - } else { - perf_shm = env_shared_lookup("PerfSharedMem"); - } - - if (perf_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf init failed on EM-core:%u\n", em_core_id()); - } else if (core == 0) { - memset(perf_shm, 0, sizeof(perf_shm_t)); - env_atomic64_init(&perf_shm->ready_count); - env_atomic64_init(&perf_shm->seen_all_ready); - } -} - -/** - * Startup of the Pairs performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - em_queue_t queue_a, queue_b; - em_status_t ret, start_ret = EM_ERROR; - eo_context_t *eo_ctx; - int i; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* - * Create and start application pairs - * Send initial test events to the queues - */ - for (i = 0; i < NUM_EO / 2; i++) { - char eo_name[EM_EO_NAME_LEN]; - char queue_name[EM_QUEUE_NAME_LEN]; - em_event_t start_event; - perf_event_t *perf; - - /* Create EO "A" */ - eo_ctx = &perf_shm->perf_eo_context[2 * i].eo_ctx; - eo_ctx->initialize_events = 1; /* EO to init events at start */ - eo_ctx->next_seq = 0; - - snprintf(eo_name, sizeof(eo_name), "EO-A%i", i); - eo_name[sizeof(eo_name) - 1] = '\0'; - eo = em_eo_create(eo_name, perf_start, NULL, perf_stop, NULL, - perf_receive_a, eo_ctx); - - snprintf(queue_name, sizeof(queue_name), "Q-A%i", i); - queue_name[sizeof(queue_name) - 1] = '\0'; - queue_a = em_queue_create(queue_name, EM_QUEUE_TYPE_ATOMIC, - get_queue_priority(i), - EM_QUEUE_GROUP_DEFAULT, NULL); - - ret = em_eo_add_queue_sync(eo, queue_a); - test_fatal_if(ret != EM_OK, - "EO or Q creation failed:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo, queue_a); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start failed:%" PRI_STAT " %" PRI_STAT "\n" - "EO:%" PRI_EO "", ret, start_ret, eo); - - /* Create EO "B" */ - eo_ctx = &perf_shm->perf_eo_context[2 * i + 1].eo_ctx; - eo_ctx->next_seq = 0; - - snprintf(eo_name, sizeof(eo_name), "EO-B%i", i); - eo_name[sizeof(eo_name) - 1] = '\0'; - eo = em_eo_create(eo_name, perf_start, NULL, perf_stop, NULL, - perf_receive_b, eo_ctx); - - snprintf(queue_name, sizeof(queue_name), "Q-B%i", i); - queue_name[sizeof(queue_name) - 1] = '\0'; - queue_b = em_queue_create(queue_name, EM_QUEUE_TYPE_ATOMIC, - get_queue_priority(i), - EM_QUEUE_GROUP_DEFAULT, NULL); - - ret = em_eo_add_queue_sync(eo, queue_b); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo, queue_b); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start failed:%" PRI_STAT " %" PRI_STAT "\n" - "EO: %" PRI_EO "", ret, start_ret, eo); - - start_event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(start_event == EM_EVENT_UNDEF, - "Start event alloc failed"); - perf = em_event_pointer(start_event); - perf->seq = 0; - perf->dest = queue_b; /* EO-A sends to queue-B */ - - ret = em_send(start_event, queue_a); - test_fatal_if(ret != EM_OK, - "Start event send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, queue_a); - } - env_sync_mem(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* Stop all EOs to disable dipatch from all the EOs' queues */ - for (i = 0; i < NUM_EO; i++) { - eo = perf_shm->perf_eo_context[i].eo_ctx.id; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - } - - for (i = 0; i < NUM_EO; i++) { - eo = perf_shm->perf_eo_context[i].eo_ctx.id; - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO rem-Q-all-sync:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - } -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(perf_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - char eo_name[EM_EO_NAME_LEN]; - - (void)conf; - - em_eo_get_name(eo, eo_name, sizeof(eo_name)); - APPL_PRINT("%s (id:%" PRI_EO ") starting.\n", eo_name, eo); - - eo_ctx->id = eo; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t -perf_stop(void *eo_context, em_eo_t eo) -{ - char eo_name[EM_EO_NAME_LEN]; - - (void)eo_context; - - em_eo_get_name(eo, eo_name, sizeof(eo_name)); - APPL_PRINT("%s (id:%" PRI_EO ") stopping.\n", eo_name, eo); - - return EM_OK; -} - -static void -initialize_events(em_queue_t queue_a, em_queue_t queue_b) -{ - /* tmp storage for allocated events to send */ - em_event_t events[NUM_EVENT]; - int i; - - for (i = 0; i < NUM_EVENT; i++) { - perf_event_t *perf; - - events[i] = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(events[i] == EM_EVENT_UNDEF, - "Event alloc failed (%d)", i); - perf = em_event_pointer(events[i]); - perf->seq = i; - perf->dest = queue_b; /* EO-A sends to queue-B */ - } - - /* - * Send the test events first to EO-A's queue-A. - * Send in bursts of 'SEND_MULTI_MAX' events. - */ - const int send_rounds = NUM_EVENT / SEND_MULTI_MAX; - const int left_over = NUM_EVENT % SEND_MULTI_MAX; - int num_sent = 0; - - for (i = 0; i < send_rounds; i++) { - num_sent += em_send_multi(&events[num_sent], SEND_MULTI_MAX, - queue_a); - } - if (left_over) { - num_sent += em_send_multi(&events[num_sent], left_over, - queue_a); - } - if (unlikely(num_sent != NUM_EVENT)) { - test_fatal_if(!appl_shm->exit_flag, - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, NUM_EVENT, queue_a); - for (i = num_sent; i < NUM_EVENT; i++) - em_free(events[i]); - } -} - -/** - * @private - * - * EO receive function for EO A. - * - * Loops back events and calculates the event rate. - */ -static void -perf_receive_a(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - const int core = em_core_id(); - perf_event_t *const perf = em_event_pointer(event); - uint64_t events = perf_shm->core_stat[core].events; - int call_atomic_processing_end = - perf_shm->core_stat[core].atomic_processing_end; - int ready = perf_shm->core_stat[core].ready; - uint64_t ready_count; - em_queue_t dest_queue; - em_status_t ret; - - (void)type; - (void)q_ctx; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(events == 0)) { - eo_context_t *const eo_ctx = eo_context; - - if (unlikely(eo_ctx->initialize_events)) { - /* start-up: initialize the perf event sending */ - eo_ctx->initialize_events = 0; - initialize_events(queue, perf->dest); - em_free(event); - return; - } - perf_shm->core_stat[core].begin_cycles = env_get_cycle(); - } else if (unlikely(!ready && events > PRINT_EVENT_COUNT)) { - /* Measurement done, collect cycle count */ - perf_shm->core_stat[core].end_cycles = env_get_cycle(); - /* - * Three measurement rounds: calculate results only for the - * middle round. - * Trigger core-sync after the last round to have all cores - * in the same mode for the next three rounds. - */ - int rounds = perf_shm->core_stat[core].rounds++; - - if (rounds % 3 == 1) { - /* Calculate results for middle round */ - calc_result(&perf_shm->core_stat[core], events); - } else if (rounds % 3 == 2) { - /* Print earlier calculated results after last round */ - print_result(&perf_shm->core_stat[core]); - /* Mark that the core is ready with all rounds */ - ready = 1; - perf_shm->core_stat[core].ready = 1; - env_atomic64_inc(&perf_shm->ready_count); - } - } - - events++; - - if (CHECK_SEQ_PER_EVENT) - check_seq_per_event(eo_context, perf, queue); - - dest_queue = perf->dest; - perf->dest = queue; - - perf_shm->core_stat[core].events = events; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dest_queue); - return; - } - - if (call_atomic_processing_end) - em_atomic_processing_end(); - - if (unlikely(ready)) { - /* core ready with rounds, check if other cores are also ready*/ - ready_count = env_atomic64_get(&perf_shm->ready_count); - - if (ready_count == (uint64_t)em_core_count()) { - /* Change mode after last round */ - perf_shm->core_stat[core].atomic_processing_end = - !call_atomic_processing_end; - perf_shm->core_stat[core].ready = 0; - events = 0; - perf_shm->core_stat[core].events = 0; - - /* Track that all cores have seen that all are ready */ - uint64_t seen_all_ready = - env_atomic64_add_return(&perf_shm->seen_all_ready, 1); - - /* Last core to see 'all ready' resets the counters */ - if (seen_all_ready == (uint64_t)em_core_count()) { - env_atomic64_set(&perf_shm->ready_count, 0); - env_atomic64_set(&perf_shm->seen_all_ready, 0); - } - } - } - - /* - * Give a hint to the scheduler indicating that event - * processing on this core will soon be finished and the - * scheduler could start preparing the next event for this - * core already now to reduce latency etc. The em_preschedule() - * call might only be meaningful with HW schedulers. - */ - em_preschedule(); - - /* Do some dummy processing */ - do_dummy_work(WORK_LOOPS); -} - -/** - * @private - * - * EO receive function for EO B. - * - * Loops back events. - */ -static void -perf_receive_b(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - const int core = em_core_id(); - perf_event_t *const perf = em_event_pointer(event); - const int call_atomic_processing_end = - perf_shm->core_stat[core].atomic_processing_end; - uint64_t events = perf_shm->core_stat[core].events; - em_queue_t dest_queue; - em_status_t ret; - (void)type; - (void)q_ctx; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(events == 0)) { - /* Restart the measurement */ - perf_shm->core_stat[core].begin_cycles = env_get_cycle(); - } - - events++; - - if (CHECK_SEQ_PER_EVENT) - check_seq_per_event(eo_context, perf, queue); - - dest_queue = perf->dest; - perf->dest = queue; - - perf_shm->core_stat[core].events = events; - - ret = em_send(event, dest_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dest_queue); - return; - } - - if (call_atomic_processing_end) - em_atomic_processing_end(); - - /* - * Give a hint to the scheduler indicating that event processing on - * this core will soon be finished and the scheduler could start - * preparing the next event for this core already now to reduce - * latency etc. The em_preschedule() call might only be meaningful - * with HW schedulers. - */ - em_preschedule(); - - /* Do some dummy processing */ - do_dummy_work(WORK_LOOPS); -} - -static void -check_seq_per_event(eo_context_t *const eo_ctx, perf_event_t *const perf, - em_queue_t queue) -{ - int seq = perf->seq; - - if (unlikely(seq != eo_ctx->next_seq)) { - char eo_name[EM_EO_NAME_LEN]; - char queue_name[EM_QUEUE_NAME_LEN]; - - em_eo_get_name(eo_ctx->id, eo_name, sizeof(eo_name)); - em_queue_get_name(queue, queue_name, sizeof(queue_name)); - - APPL_PRINT("Bad sequence number. %s(id:%" PRI_EO "),\t" - "%s(id:%" PRI_QUEUE ") expected seq %i, event seq %i\n", - eo_name, eo_ctx->id, queue_name, queue, - eo_ctx->next_seq, seq); - } - - if (likely(eo_ctx->next_seq < (NUM_EVENT - 1))) - eo_ctx->next_seq++; - else - eo_ctx->next_seq = 0; -} - -static void -do_dummy_work(unsigned int work_loops) -{ - em_event_t workbuf_event; - perf_event_t *workbuf; - uint8_t *from, *to; - unsigned int i; - - for (i = 0; i < work_loops && !appl_shm->exit_flag; i++) { - /* Dummy workload after releasing atomic context */ - workbuf_event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - workbuf = em_event_pointer(workbuf_event); - from = &workbuf->data[DATA_SIZE / 2]; - to = &workbuf->data[0]; - memcpy(to, from, DATA_SIZE / 2); - em_free(workbuf_event); - } -} - -/** - * Prints test measurement result - */ -static void -calc_result(perf_stat_t *const perf_stat, const uint64_t events) -{ - uint64_t diff; - double cycles_per_event; - - if (perf_stat->end_cycles > perf_stat->begin_cycles) - diff = perf_stat->end_cycles - perf_stat->begin_cycles; - else - diff = UINT64_MAX - perf_stat->begin_cycles + - perf_stat->end_cycles + 1; - - cycles_per_event = ((double)diff) / ((double)events); - - perf_stat->cycles_per_event = cycles_per_event; -} - -/** - * Get queue priority value based on the index number. - * - * @param Queue index - * - * @return Queue priority value - * - * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH - */ -static int -get_queue_priority(const int queue_index) -{ - int remainder = queue_index % 5; - - if (remainder <= 1) - return EM_QUEUE_PRIO_LOW; - else if (remainder <= 3) - return EM_QUEUE_PRIO_NORMAL; - else - return EM_QUEUE_PRIO_HIGH; -} - -static void -print_result(perf_stat_t *const perf_stat) -{ - const uint32_t hz = env_core_hz(); - const double mhz = ((double)hz) / 1000000.0; - const double cycles_per_event = perf_stat->cycles_per_event; - const double events_per_sec = mhz * em_core_count() / - cycles_per_event; /* Million events/s*/ - const uint64_t print_count = perf_stat->print_count++; - - if (perf_stat->atomic_processing_end) { - APPL_PRINT("em_atomic_processing_end():%10.0f cycles/event\t" - "events/s:%.2f M @%.2f MHz (core-%02i %" PRIu64 ")\n", - cycles_per_event, events_per_sec, mhz, - em_core_id(), print_count); - } else { - APPL_PRINT("normal atomic processing:%12.0f cycles/event\t" - "events/s:%.2f M @%.2f MHz (core-%02i %" PRIu64 ")\n", - cycles_per_event, events_per_sec, - mhz, em_core_id(), print_count); - } -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine em_atomic_processing_end() example + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for an EO pair using atomic queues and alternating between calling + * em_atomic_processing_end() and not calling it. + * Each EO's receive function will do some dummy work for each received event. + * The em_atomic_processing_end() is called before processing the dummy work to + * allow another core to continue processing events from the queue. Not calling + * em_atomic_processing_end() with a low number of queues and long per-event + * processing times will limit the throughput and the cycles/event result. + * For comparison, both results with em_atomic_processing_end()-calling enabled + * and disabled is shown. + * Note: Calling em_atomic_processing_end() will normally give worse + * performance except in cases when atomic event processing becomes a + * bottleneck by blocking other cores from doing their work (as this test + * tries to demonstrate). + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test configuration + */ + +/** + * Number of test EOs and queues. Must be an even number. + * Create a BOTTLENECK for atomic processing in the system by + * keeping the number smaller than the available cores. + */ +#define NUM_EO 2 /* BOTTLENECK */ + +/** Number of ping-pong events per EO pair. */ +#define NUM_EVENT 512 + +/** Number of data bytes in the event */ +#define DATA_SIZE 512 + +/** Max number of cores */ +#define MAX_NBR_OF_CORES 256 + +/** + * Number of "work" iterations to do per received event to show the + * possible benefit of em_atomic_processing_end() + */ +#define WORK_LOOPS 40 /* cause long per-event processing time */ + +/* The number of events to be received per core before printing results */ +#define PRINT_EVENT_COUNT 0x20000 + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/* + * Per event processing options + */ + +/* Check sequence numbers, works only with atomic queues */ +#define CHECK_SEQ_PER_EVENT 1 /* 0=False or 1=True */ + +/** + * Performance test statistics (per core) + */ +typedef union { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + struct { + uint64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; + int atomic_processing_end; + int rounds; + int ready; + double cycles_per_event; + }; +} perf_stat_t; + +COMPILE_TIME_ASSERT(sizeof(perf_stat_t) == ENV_CACHE_LINE_SIZE, + PERF_STAT_T_SIZE_ERROR); + +/** + * Performance test EO context + */ +typedef struct { + /* EO context id */ + em_eo_t id; + /* Next sequence number (used with CHECK_SEQ_PER_EVENT) */ + int next_seq; + /* at startup: EO-A should allocate and send the test events */ + int initialize_events; +} eo_context_t; + +/** + * EO context padded to cache line size + */ +typedef union { + uint8_t u8[ENV_CACHE_LINE_SIZE] ENV_CACHE_LINE_ALIGNED; + eo_context_t eo_ctx; +} eo_context_array_elem_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_array_elem_t) == ENV_CACHE_LINE_SIZE, + PERF_EO_CONTEXT_SIZE_ERROR); + +/** + * Performance test event + */ +typedef struct { + /* Next destination queue */ + em_queue_t dest; + /* Sequence number */ + int seq; + /* Test data */ + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Perf test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + /* EO context array */ + eo_context_array_elem_t perf_eo_context[NUM_EO] ENV_CACHE_LINE_ALIGNED; + /* Array of core specific data accessed by using its core index */ + perf_stat_t core_stat[MAX_NBR_OF_CORES] ENV_CACHE_LINE_ALIGNED; + /* Track the number of cores ready with the current measurement rounds*/ + env_atomic64_t ready_count ENV_CACHE_LINE_ALIGNED; + /* Track the number of cores that have seen that all others are ready */ + env_atomic64_t seen_all_ready; + /* Pad to size to a multiple of cache lines */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} perf_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; + +/* + * Local function prototypes + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +perf_stop(void *eo_context, em_eo_t eo); + +static void +initialize_events(em_queue_t queue_a, em_queue_t queue_b); + +static void +perf_receive_a(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +perf_receive_b(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); +static void +calc_result(perf_stat_t *const perf_stat, const uint64_t events); + +static void +print_result(perf_stat_t *const perf_stat); + +static int +get_queue_priority(const int index); + +static void +check_seq_per_event(eo_context_t *const eo_ctx, perf_event_t *const perf, + em_queue_t queue); +static void +do_dummy_work(unsigned int work_loops); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Pairs performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(test_error_handler); + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf init failed on EM-core:%u\n", em_core_id()); + } else if (core == 0) { + memset(perf_shm, 0, sizeof(perf_shm_t)); + env_atomic64_init(&perf_shm->ready_count); + env_atomic64_init(&perf_shm->seen_all_ready); + } +} + +/** + * Startup of the Pairs performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + em_queue_t queue_a, queue_b; + em_status_t ret, start_ret = EM_ERROR; + eo_context_t *eo_ctx; + int i; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start application pairs + * Send initial test events to the queues + */ + for (i = 0; i < NUM_EO / 2; i++) { + char eo_name[EM_EO_NAME_LEN]; + char queue_name[EM_QUEUE_NAME_LEN]; + em_event_t start_event; + perf_event_t *perf; + + /* Create EO "A" */ + eo_ctx = &perf_shm->perf_eo_context[2 * i].eo_ctx; + eo_ctx->initialize_events = 1; /* EO to init events at start */ + eo_ctx->next_seq = 0; + + snprintf(eo_name, sizeof(eo_name), "EO-A%i", i); + eo_name[sizeof(eo_name) - 1] = '\0'; + eo = em_eo_create(eo_name, perf_start, NULL, perf_stop, NULL, + perf_receive_a, eo_ctx); + + snprintf(queue_name, sizeof(queue_name), "Q-A%i", i); + queue_name[sizeof(queue_name) - 1] = '\0'; + queue_a = em_queue_create(queue_name, EM_QUEUE_TYPE_ATOMIC, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + + ret = em_eo_add_queue_sync(eo, queue_a); + test_fatal_if(ret != EM_OK, + "EO or Q creation failed:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo, queue_a); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start failed:%" PRI_STAT " %" PRI_STAT "\n" + "EO:%" PRI_EO "", ret, start_ret, eo); + + /* Create EO "B" */ + eo_ctx = &perf_shm->perf_eo_context[2 * i + 1].eo_ctx; + eo_ctx->next_seq = 0; + + snprintf(eo_name, sizeof(eo_name), "EO-B%i", i); + eo_name[sizeof(eo_name) - 1] = '\0'; + eo = em_eo_create(eo_name, perf_start, NULL, perf_stop, NULL, + perf_receive_b, eo_ctx); + + snprintf(queue_name, sizeof(queue_name), "Q-B%i", i); + queue_name[sizeof(queue_name) - 1] = '\0'; + queue_b = em_queue_create(queue_name, EM_QUEUE_TYPE_ATOMIC, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + + ret = em_eo_add_queue_sync(eo, queue_b); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo, queue_b); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start failed:%" PRI_STAT " %" PRI_STAT "\n" + "EO: %" PRI_EO "", ret, start_ret, eo); + + start_event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(start_event == EM_EVENT_UNDEF, + "Start event alloc failed"); + perf = em_event_pointer(start_event); + perf->seq = 0; + perf->dest = queue_b; /* EO-A sends to queue-B */ + + ret = em_send(start_event, queue_a); + test_fatal_if(ret != EM_OK, + "Start event send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, queue_a); + } + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* Stop all EOs to disable dipatch from all the EOs' queues */ + for (i = 0; i < NUM_EO; i++) { + eo = perf_shm->perf_eo_context[i].eo_ctx.id; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + } + + for (i = 0; i < NUM_EO; i++) { + eo = perf_shm->perf_eo_context[i].eo_ctx.id; + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO rem-Q-all-sync:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + char eo_name[EM_EO_NAME_LEN]; + + (void)conf; + + em_eo_get_name(eo, eo_name, sizeof(eo_name)); + APPL_PRINT("%s (id:%" PRI_EO ") starting.\n", eo_name, eo); + + eo_ctx->id = eo; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +perf_stop(void *eo_context, em_eo_t eo) +{ + char eo_name[EM_EO_NAME_LEN]; + + (void)eo_context; + + em_eo_get_name(eo, eo_name, sizeof(eo_name)); + APPL_PRINT("%s (id:%" PRI_EO ") stopping.\n", eo_name, eo); + + return EM_OK; +} + +static void +initialize_events(em_queue_t queue_a, em_queue_t queue_b) +{ + /* tmp storage for allocated events to send */ + em_event_t events[NUM_EVENT]; + int i; + + for (i = 0; i < NUM_EVENT; i++) { + perf_event_t *perf; + + events[i] = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(events[i] == EM_EVENT_UNDEF, + "Event alloc failed (%d)", i); + perf = em_event_pointer(events[i]); + perf->seq = i; + perf->dest = queue_b; /* EO-A sends to queue-B */ + } + + /* + * Send the test events first to EO-A's queue-A. + * Send in bursts of 'SEND_MULTI_MAX' events. + */ + const int send_rounds = NUM_EVENT / SEND_MULTI_MAX; + const int left_over = NUM_EVENT % SEND_MULTI_MAX; + int num_sent = 0; + + for (i = 0; i < send_rounds; i++) { + num_sent += em_send_multi(&events[num_sent], SEND_MULTI_MAX, + queue_a); + } + if (left_over) { + num_sent += em_send_multi(&events[num_sent], left_over, + queue_a); + } + if (unlikely(num_sent != NUM_EVENT)) { + test_fatal_if(!appl_shm->exit_flag, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT, queue_a); + for (i = num_sent; i < NUM_EVENT; i++) + em_free(events[i]); + } +} + +/** + * @private + * + * EO receive function for EO A. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_a(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + const int core = em_core_id(); + perf_event_t *const perf = em_event_pointer(event); + uint64_t events = perf_shm->core_stat[core].events; + int call_atomic_processing_end = + perf_shm->core_stat[core].atomic_processing_end; + int ready = perf_shm->core_stat[core].ready; + uint64_t ready_count; + em_queue_t dest_queue; + em_status_t ret; + + (void)type; + (void)q_ctx; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + eo_context_t *const eo_ctx = eo_context; + + if (unlikely(eo_ctx->initialize_events)) { + /* start-up: initialize the perf event sending */ + eo_ctx->initialize_events = 0; + initialize_events(queue, perf->dest); + em_free(event); + return; + } + perf_shm->core_stat[core].begin_cycles = env_get_cycle(); + } else if (unlikely(!ready && events > PRINT_EVENT_COUNT)) { + /* Measurement done, collect cycle count */ + perf_shm->core_stat[core].end_cycles = env_get_cycle(); + /* + * Three measurement rounds: calculate results only for the + * middle round. + * Trigger core-sync after the last round to have all cores + * in the same mode for the next three rounds. + */ + int rounds = perf_shm->core_stat[core].rounds++; + + if (rounds % 3 == 1) { + /* Calculate results for middle round */ + calc_result(&perf_shm->core_stat[core], events); + } else if (rounds % 3 == 2) { + /* Print earlier calculated results after last round */ + print_result(&perf_shm->core_stat[core]); + /* Mark that the core is ready with all rounds */ + ready = 1; + perf_shm->core_stat[core].ready = 1; + env_atomic64_inc(&perf_shm->ready_count); + } + } + + events++; + + if (CHECK_SEQ_PER_EVENT) + check_seq_per_event(eo_context, perf, queue); + + dest_queue = perf->dest; + perf->dest = queue; + + perf_shm->core_stat[core].events = events; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dest_queue); + return; + } + + if (call_atomic_processing_end) + em_atomic_processing_end(); + + if (unlikely(ready)) { + /* core ready with rounds, check if other cores are also ready*/ + ready_count = env_atomic64_get(&perf_shm->ready_count); + + if (ready_count == (uint64_t)em_core_count()) { + /* Change mode after last round */ + perf_shm->core_stat[core].atomic_processing_end = + !call_atomic_processing_end; + perf_shm->core_stat[core].ready = 0; + events = 0; + perf_shm->core_stat[core].events = 0; + + /* Track that all cores have seen that all are ready */ + uint64_t seen_all_ready = + env_atomic64_add_return(&perf_shm->seen_all_ready, 1); + + /* Last core to see 'all ready' resets the counters */ + if (seen_all_ready == (uint64_t)em_core_count()) { + env_atomic64_set(&perf_shm->ready_count, 0); + env_atomic64_set(&perf_shm->seen_all_ready, 0); + } + } + } + + /* + * Give a hint to the scheduler indicating that event + * processing on this core will soon be finished and the + * scheduler could start preparing the next event for this + * core already now to reduce latency etc. The em_preschedule() + * call might only be meaningful with HW schedulers. + */ + em_preschedule(); + + /* Do some dummy processing */ + do_dummy_work(WORK_LOOPS); +} + +/** + * @private + * + * EO receive function for EO B. + * + * Loops back events. + */ +static void +perf_receive_b(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + const int core = em_core_id(); + perf_event_t *const perf = em_event_pointer(event); + const int call_atomic_processing_end = + perf_shm->core_stat[core].atomic_processing_end; + uint64_t events = perf_shm->core_stat[core].events; + em_queue_t dest_queue; + em_status_t ret; + (void)type; + (void)q_ctx; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + /* Restart the measurement */ + perf_shm->core_stat[core].begin_cycles = env_get_cycle(); + } + + events++; + + if (CHECK_SEQ_PER_EVENT) + check_seq_per_event(eo_context, perf, queue); + + dest_queue = perf->dest; + perf->dest = queue; + + perf_shm->core_stat[core].events = events; + + ret = em_send(event, dest_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dest_queue); + return; + } + + if (call_atomic_processing_end) + em_atomic_processing_end(); + + /* + * Give a hint to the scheduler indicating that event processing on + * this core will soon be finished and the scheduler could start + * preparing the next event for this core already now to reduce + * latency etc. The em_preschedule() call might only be meaningful + * with HW schedulers. + */ + em_preschedule(); + + /* Do some dummy processing */ + do_dummy_work(WORK_LOOPS); +} + +static void +check_seq_per_event(eo_context_t *const eo_ctx, perf_event_t *const perf, + em_queue_t queue) +{ + int seq = perf->seq; + + if (unlikely(seq != eo_ctx->next_seq)) { + char eo_name[EM_EO_NAME_LEN]; + char queue_name[EM_QUEUE_NAME_LEN]; + + em_eo_get_name(eo_ctx->id, eo_name, sizeof(eo_name)); + em_queue_get_name(queue, queue_name, sizeof(queue_name)); + + APPL_PRINT("Bad sequence number. %s(id:%" PRI_EO "),\t" + "%s(id:%" PRI_QUEUE ") expected seq %i, event seq %i\n", + eo_name, eo_ctx->id, queue_name, queue, + eo_ctx->next_seq, seq); + } + + if (likely(eo_ctx->next_seq < (NUM_EVENT - 1))) + eo_ctx->next_seq++; + else + eo_ctx->next_seq = 0; +} + +static void +do_dummy_work(unsigned int work_loops) +{ + em_event_t workbuf_event; + perf_event_t *workbuf; + uint8_t *from, *to; + unsigned int i; + + for (i = 0; i < work_loops && !appl_shm->exit_flag; i++) { + /* Dummy workload after releasing atomic context */ + workbuf_event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(workbuf_event == EM_EVENT_UNDEF, + "em_alloc(pool:%" PRI_POOL ") of buf:%u of tot:%u failed!", + perf_shm->pool, i, work_loops); + workbuf = em_event_pointer(workbuf_event); + from = &workbuf->data[DATA_SIZE / 2]; + to = &workbuf->data[0]; + memcpy(to, from, DATA_SIZE / 2); + em_free(workbuf_event); + } +} + +/** + * Prints test measurement result + */ +static void +calc_result(perf_stat_t *const perf_stat, const uint64_t events) +{ + uint64_t diff; + double cycles_per_event; + + diff = env_cycles_diff(perf_stat->end_cycles, perf_stat->begin_cycles); + + cycles_per_event = ((double)diff) / ((double)events); + + perf_stat->cycles_per_event = cycles_per_event; +} + +/** + * Get queue priority value based on the index number. + * + * @param Queue index + * + * @return Queue priority value + * + * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH + */ +static int +get_queue_priority(const int queue_index) +{ + int remainder = queue_index % 5; + + if (remainder <= 1) + return EM_QUEUE_PRIO_LOW; + else if (remainder <= 3) + return EM_QUEUE_PRIO_NORMAL; + else + return EM_QUEUE_PRIO_HIGH; +} + +static void +print_result(perf_stat_t *const perf_stat) +{ + const uint32_t hz = env_core_hz(); + const double mhz = ((double)hz) / 1000000.0; + const double cycles_per_event = perf_stat->cycles_per_event; + const double events_per_sec = mhz * em_core_count() / + cycles_per_event; /* Million events/s*/ + const uint64_t print_count = perf_stat->print_count++; + + if (perf_stat->atomic_processing_end) { + APPL_PRINT("em_atomic_processing_end():%10.0f cycles/event\t" + "events/s:%.2f M @%.2f MHz (core-%02i %" PRIu64 ")\n", + cycles_per_event, events_per_sec, mhz, + em_core_id(), print_count); + } else { + APPL_PRINT("normal atomic processing:%12.0f cycles/event\t" + "events/s:%.2f M @%.2f MHz (core-%02i %" PRIu64 ")\n", + cycles_per_event, events_per_sec, + mhz, em_core_id(), print_count); + } +} diff --git a/programs/performance/loop.c b/programs/performance/loop.c index 7f4c27a8..a06e07ce 100644 --- a/programs/performance/loop.c +++ b/programs/performance/loop.c @@ -1,459 +1,455 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test example - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of EOs in the system. The test has a number of EOs, each - * with one queue. Each EO receives events through its dedicated queue and - * sends them right back into the same queue, thus looping the events. - * - * Based on the 'pairs' performance test, but instead of forwarding events - * between queues, here we loop them back into the same queue (which is usually - * faster). Also 'loop' only uses one queue priority level. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test configuration - */ - -/** Number of test EOs and queues. Must be an even number. */ -#define NUM_EO 128 - -/** Number of events per queue */ -#define NUM_EVENT_PER_QUEUE 32 /* Increase the value to tune performance */ - -/** sizeof data[DATA_SIZE] in bytes in the event payload */ -#define DATA_SIZE 250 - -/** Max number of cores */ -#define MAX_NBR_OF_CORES 256 - -/** The number of events to be received before printing a result */ -#define PRINT_EVENT_COUNT 0xff0000 - -/** EM Queue type used */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -/* - * Options - */ - -/** Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_FMT \ -"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" - -/** - * Performance test statistics (per core) - */ -typedef struct { - int64_t events; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; -} perf_stat_t; - -/** - * Performance test event - */ -typedef struct { - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Perf test shared memory, read-only after start-up, allow cache-line sharing - */ -typedef struct { - /* EO table */ - em_eo_t eo_tbl[NUM_EO]; - /* Event pool used by this application */ - em_pool_t pool; -} perf_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; -/** - * Core specific test statistics. - * - * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, - * incremented per core during receive, measurement starts at 0. - */ -static ENV_LOCAL perf_stat_t core_stat = {.events = -PRINT_EVENT_COUNT}; - -/* - * Local function prototypes - */ - -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -perf_stop(void *eo_context, em_eo_t eo); - -static void -perf_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -static void -print_result(perf_stat_t *const perf_stat); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Loop performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(test_error_handler); - } else { - perf_shm = env_shared_lookup("PerfSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf init failed on EM-core:%u", em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Loop performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* - * Create and start application EOs - * Send initial test events to the EOs' queues - */ - em_queue_t queues[NUM_EO]; - - for (int i = 0; i < NUM_EO; i++) { - em_queue_t queue; - em_eo_t eo; - em_status_t ret, start_ret = EM_ERROR; - - /* Create the EO's loop queue */ - queue = em_queue_create("queue A", QUEUE_TYPE, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "Queue creation failed, round:%d", i); - queues[i] = queue; - - /* Create the EO */ - eo = em_eo_create("loop-eo", perf_start, NULL, perf_stop, NULL, - perf_receive, NULL); - test_fatal_if(eo == EM_EO_UNDEF, - "EO(%d) creation failed!", i); - perf_shm->eo_tbl[i] = eo; - - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo, queue); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - } - - for (int i = 0; i < NUM_EO; i++) { - em_queue_t queue = queues[i]; - em_event_t events[NUM_EVENT_PER_QUEUE]; - - /* Alloc and send test events */ - for (int j = 0; j < NUM_EVENT_PER_QUEUE; j++) { - em_event_t ev; - - ev = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(ev == EM_EVENT_UNDEF, - "Event allocation failed (%d, %d)", i, j); - events[j] = ev; - } - - /* Send in bursts of 'SEND_MULTI_MAX' events */ - const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; - const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; - int num_sent = 0; - int m, n; - - for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { - num_sent += em_send_multi(&events[n], SEND_MULTI_MAX, - queue); - } - if (left_over) { - num_sent += em_send_multi(&events[n], left_over, - queue); - } - test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, NUM_EVENT_PER_QUEUE, queue); - } - - env_sync_mem(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - for (i = 0; i < NUM_EO; i++) { - /* Stop & delete EO */ - eo = perf_shm->eo_tbl[i]; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - } -} - -void -test_term(void) -{ - const int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(perf_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - (void)eo_context; - (void)eo; - (void)conf; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t -perf_stop(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - return ret; -} - -/** - * @private - * - * EO receive function for EO A. - * - * Loops back events and calculates the event rate. - */ -static void -perf_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - int64_t events = core_stat.events; - em_status_t ret; - - (void)eo_context; - (void)type; - (void)queue_context; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(events == 0)) { - /* Start the measurement */ - core_stat.begin_cycles = env_get_cycle(); - } else if (unlikely(events == PRINT_EVENT_COUNT)) { - /* End the measurement */ - core_stat.end_cycles = env_get_cycle(); - /* Print results and restart */ - core_stat.print_count += 1; - print_result(&core_stat); - /* Restart the measurement next round */ - events = -1; /* +1 below => 0 */ - } - - if (ALLOC_FREE_PER_EVENT) { - em_free(event); - event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); - } - - /* Send the event back into the queue it originated from, i.e. loop */ - ret = em_send(event, queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, queue); - } - - events++; - core_stat.events = events; -} - -/** - * Prints test measurement result - */ -static void -print_result(perf_stat_t *const perf_stat) -{ - uint64_t diff; - uint32_t hz; - double mhz; - double cycles_per_event, events_per_sec; - uint64_t print_count; - - hz = env_core_hz(); - mhz = ((double)hz) / 1000000.0; - - if (perf_stat->end_cycles > perf_stat->begin_cycles) - diff = perf_stat->end_cycles - perf_stat->begin_cycles; - else - diff = UINT64_MAX - perf_stat->begin_cycles + - perf_stat->end_cycles + 1; - - print_count = perf_stat->print_count; - cycles_per_event = ((double)diff) / ((double)perf_stat->events); - events_per_sec = mhz / cycles_per_event; /* Million events/s */ - - APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, - mhz, em_core_id(), print_count); -} +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test example + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EOs in the system. The test has a number of EOs, each + * with one queue. Each EO receives events through its dedicated queue and + * sends them right back into the same queue, thus looping the events. + * + * Based on the 'pairs' performance test, but instead of forwarding events + * between queues, here we loop them back into the same queue (which is usually + * faster). Also 'loop' only uses one queue priority level. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test configuration + */ + +/** Number of test EOs and queues. Must be an even number. */ +#define NUM_EO 128 + +/** Number of events per queue */ +#define NUM_EVENT_PER_QUEUE 32 /* Increase the value to tune performance */ + +/** sizeof data[DATA_SIZE] in bytes in the event payload */ +#define DATA_SIZE 250 + +/** Max number of cores */ +#define MAX_NBR_OF_CORES 256 + +/** The number of events to be received before printing a result */ +#define PRINT_EVENT_COUNT 0xff0000 + +/** EM Queue type used */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/* + * Options + */ + +/** Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_FMT \ +"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" + +/** + * Performance test statistics (per core) + */ +typedef struct { + int64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; +} perf_stat_t; + +/** + * Performance test event + */ +typedef struct { + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Perf test shared memory, read-only after start-up, allow cache-line sharing + */ +typedef struct { + /* EO table */ + em_eo_t eo_tbl[NUM_EO]; + /* Event pool used by this application */ + em_pool_t pool; +} perf_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; +/** + * Core specific test statistics. + * + * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, + * incremented per core during receive, measurement starts at 0. + */ +static ENV_LOCAL perf_stat_t core_stat = {.events = -PRINT_EVENT_COUNT}; + +/* + * Local function prototypes + */ + +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +perf_stop(void *eo_context, em_eo_t eo); + +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +print_result(perf_stat_t *const perf_stat); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Loop performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(test_error_handler); + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf init failed on EM-core:%u", em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Loop performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start application EOs + * Send initial test events to the EOs' queues + */ + em_queue_t queues[NUM_EO]; + + for (int i = 0; i < NUM_EO; i++) { + em_queue_t queue; + em_eo_t eo; + em_status_t ret, start_ret = EM_ERROR; + + /* Create the EO's loop queue */ + queue = em_queue_create("queue A", QUEUE_TYPE, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue creation failed, round:%d", i); + queues[i] = queue; + + /* Create the EO */ + eo = em_eo_create("loop-eo", perf_start, NULL, perf_stop, NULL, + perf_receive, NULL); + test_fatal_if(eo == EM_EO_UNDEF, + "EO(%d) creation failed!", i); + perf_shm->eo_tbl[i] = eo; + + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, queue); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + } + + for (int i = 0; i < NUM_EO; i++) { + em_queue_t queue = queues[i]; + em_event_t events[NUM_EVENT_PER_QUEUE]; + + /* Alloc and send test events */ + for (int j = 0; j < NUM_EVENT_PER_QUEUE; j++) { + em_event_t ev; + + ev = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ev == EM_EVENT_UNDEF, + "Event allocation failed (%d, %d)", i, j); + events[j] = ev; + } + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; + const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; + int num_sent = 0; + int m, n; + + for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { + num_sent += em_send_multi(&events[n], SEND_MULTI_MAX, + queue); + } + if (left_over) { + num_sent += em_send_multi(&events[n], left_over, + queue); + } + test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT_PER_QUEUE, queue); + } + + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < NUM_EO; i++) { + /* Stop & delete EO */ + eo = perf_shm->eo_tbl[i]; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } +} + +void +test_term(void) +{ + const int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_context; + (void)eo; + (void)conf; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +perf_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + return ret; +} + +/** + * @private + * + * EO receive function for EO A. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + em_status_t ret; + + (void)eo_context; + (void)type; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + /* Start the measurement */ + core_stat.begin_cycles = env_get_cycle(); + } else if (unlikely(events == PRINT_EVENT_COUNT)) { + /* End the measurement */ + core_stat.end_cycles = env_get_cycle(); + /* Print results and restart */ + core_stat.print_count += 1; + print_result(&core_stat); + /* Restart the measurement next round */ + events = -1; /* +1 below => 0 */ + } + + if (ALLOC_FREE_PER_EVENT) { + em_free(event); + event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + } + + /* Send the event back into the queue it originated from, i.e. loop */ + ret = em_send(event, queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, queue); + } + + events++; + core_stat.events = events; +} + +/** + * Prints test measurement result + */ +static void +print_result(perf_stat_t *const perf_stat) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event, events_per_sec; + uint64_t print_count; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + diff = env_cycles_diff(perf_stat->end_cycles, perf_stat->begin_cycles); + + print_count = perf_stat->print_count; + cycles_per_event = ((double)diff) / ((double)perf_stat->events); + events_per_sec = mhz / cycles_per_event; /* Million events/s */ + + APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, + mhz, em_core_id(), print_count); +} diff --git a/programs/performance/loop_multircv.c b/programs/performance/loop_multircv.c index 808d9d9c..a66e406f 100644 --- a/programs/performance/loop_multircv.c +++ b/programs/performance/loop_multircv.c @@ -1,469 +1,465 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test example - based on loop.c but uses - * a multi-event EO-receive function. - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of EOs in the system. The test has a number of EOs, each - * with one queue. Each EO receives events through its dedicated queue and - * sends them right back into the same queue, thus looping the events. - * - * Based on the 'pairs' performance test, but instead of forwarding events - * between queues, here we loop them back into the same queue (which is usually - * faster). Also 'loop' only uses one queue priority level. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test configuration - */ - -/** Number of test EOs and queues. Must be an even number. */ -#define NUM_EO 128 - -/** Number of events per queue */ -#define NUM_EVENT_PER_QUEUE 128 /* Increase the value to tune performance */ - -/** sizeof data[DATA_SIZE] in bytes in the event payload */ -#define DATA_SIZE 250 - -/** Max number of cores */ -#define MAX_NBR_OF_CORES 256 - -/** The number of events to be received before printing a result */ -#define PRINT_EVENT_COUNT 0xff0000 - -/** EM Queue type used */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -#define MAX_RCV_FN_EVENTS 32 - -/* - * Options - */ - -/** Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_FMT \ -"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" - -/** - * Performance test statistics (per core) - */ -typedef struct { - int64_t event_count; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; -} perf_stat_t; - -/** - * Performance test event - */ -typedef struct { - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Perf test shared memory, read-only after start-up, allow cache-line sharing - */ -typedef struct { - /* EO table */ - em_eo_t eo_tbl[NUM_EO]; - /* Event pool used by this application */ - em_pool_t pool; -} perf_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; -/** - * Core specific test statistics. - * - * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, - * incremented per core during receive, measurement starts at 0. - */ -static ENV_LOCAL perf_stat_t core_stat = {.event_count = 0}; - -/* - * Local function prototypes - */ - -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -perf_stop(void *eo_context, em_eo_t eo); - -static void -perf_receive_multi(void *eo_context, em_event_t event_tbl[], int num, - em_queue_t queue, void *queue_context); - -static void -print_result(perf_stat_t *const perf_stat); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Loop performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(test_error_handler); - } else { - perf_shm = env_shared_lookup("PerfSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf init failed on EM-core:%u", em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Loop performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* - * Create and start application EOs - * Send initial test events to the EOs' queues - */ - em_queue_t queues[NUM_EO]; - - for (int i = 0; i < NUM_EO; i++) { - em_queue_t queue; - em_eo_t eo; - em_eo_multircv_param_t eo_param; - em_status_t ret, start_ret = EM_ERROR; - - /* Create the EO's loop queue */ - queue = em_queue_create("queue A", QUEUE_TYPE, - EM_QUEUE_PRIO_NORMAL, - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(queue == EM_QUEUE_UNDEF, - "Queue creation failed, round:%d", i); - queues[i] = queue; - - /* Init & create the EO */ - em_eo_multircv_param_init(&eo_param); - /* Set EO params needed by this application */ - eo_param.start = perf_start; - eo_param.stop = perf_stop; - eo_param.receive_multi = perf_receive_multi; - /* eo_param.max_events = use default; */ - eo = em_eo_create_multircv("loop-eo", &eo_param); - test_fatal_if(eo == EM_EO_UNDEF, - "EO(%d) creation failed!", i); - perf_shm->eo_tbl[i] = eo; - - ret = em_eo_add_queue_sync(eo, queue); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo, queue); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - } - - for (int i = 0; i < NUM_EO; i++) { - em_queue_t queue = queues[i]; - em_event_t events[NUM_EVENT_PER_QUEUE]; - int num, tot = 0; - - /* Alloc and send test events */ - do { - num = em_alloc_multi(events, NUM_EVENT_PER_QUEUE - tot, - sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - tot += num; - } while (tot < num && num > 0); - test_fatal_if(tot != NUM_EVENT_PER_QUEUE, - "Allocated:%d of requested:%d events", - tot, NUM_EVENT_PER_QUEUE); - - /* Send in bursts of 'SEND_MULTI_MAX' events */ - const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; - const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; - int num_sent = 0; - int m, n; - - for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { - num_sent += em_send_multi(&events[n], SEND_MULTI_MAX, - queue); - } - if (left_over) { - num_sent += em_send_multi(&events[n], left_over, - queue); - } - test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, NUM_EVENT_PER_QUEUE, queue); - } - - env_sync_mem(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - for (i = 0; i < NUM_EO; i++) { - /* Stop & delete EO */ - eo = perf_shm->eo_tbl[i]; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - } -} - -void -test_term(void) -{ - const int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(perf_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - (void)eo_context; - (void)eo; - (void)conf; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t -perf_stop(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - return ret; -} - -/** - * @private - * - * EO receive function for EO A. - * - * Loops back events and calculates the event rate. - */ -static void -perf_receive_multi(void *eo_context, em_event_t event_tbl[], int num, - em_queue_t queue, void *queue_context) -{ - int64_t event_count = core_stat.event_count; - int ret; - - (void)eo_context; - (void)queue_context; - - if (unlikely(appl_shm->exit_flag)) { - em_free_multi(event_tbl, num); - return; - } - - if (unlikely(event_count == 0)) { - /* Start the measurement */ - core_stat.begin_cycles = env_get_cycle(); - } else if (unlikely(event_count >= PRINT_EVENT_COUNT)) { - /* End the measurement */ - core_stat.end_cycles = env_get_cycle(); - /* Print results and restart */ - core_stat.print_count += 1; - print_result(&core_stat); - /* Restart the measurement next round */ - event_count = -num; /* +num below => 0 */ - } - - if (ALLOC_FREE_PER_EVENT) { - em_free_multi(event_tbl, num); - int ret = em_alloc_multi(event_tbl, num, sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(ret != num, "Allocated %d of num:%d events", - ret, num); - } - - /* Send the event back into the queue it originated from, i.e. loop */ - ret = em_send_multi(event_tbl, num, queue); - if (unlikely(ret != num)) { - em_free_multi(&event_tbl[ret], num - ret); - test_fatal_if(!appl_shm->exit_flag, - "Send-multi:%d Num:%d Queue:%" PRI_QUEUE "", - ret, num, queue); - } - - event_count += num; - core_stat.event_count = event_count; -} - -/** - * Prints test measurement result - */ -static void -print_result(perf_stat_t *const perf_stat) -{ - uint64_t diff; - uint32_t hz; - double mhz; - double cycles_per_event, events_per_sec; - uint64_t print_count; - - hz = env_core_hz(); - mhz = ((double)hz) / 1000000.0; - - if (perf_stat->end_cycles > perf_stat->begin_cycles) - diff = perf_stat->end_cycles - perf_stat->begin_cycles; - else - diff = UINT64_MAX - perf_stat->begin_cycles + - perf_stat->end_cycles + 1; - - print_count = perf_stat->print_count; - cycles_per_event = ((double)diff) / ((double)perf_stat->event_count); - events_per_sec = mhz / cycles_per_event; /* Million events/s */ - - APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, - mhz, em_core_id(), print_count); -} +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test example - based on loop.c but uses + * a multi-event EO-receive function. + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EOs in the system. The test has a number of EOs, each + * with one queue. Each EO receives events through its dedicated queue and + * sends them right back into the same queue, thus looping the events. + * + * Based on the 'pairs' performance test, but instead of forwarding events + * between queues, here we loop them back into the same queue (which is usually + * faster). Also 'loop' only uses one queue priority level. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test configuration + */ + +/** Number of test EOs and queues. Must be an even number. */ +#define NUM_EO 128 + +/** Number of events per queue */ +#define NUM_EVENT_PER_QUEUE 128 /* Increase the value to tune performance */ + +/** sizeof data[DATA_SIZE] in bytes in the event payload */ +#define DATA_SIZE 250 + +/** Max number of cores */ +#define MAX_NBR_OF_CORES 256 + +/** The number of events to be received before printing a result */ +#define PRINT_EVENT_COUNT 0xff0000 + +/** EM Queue type used */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +#define MAX_RCV_FN_EVENTS 32 + +/* + * Options + */ + +/** Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_FMT \ +"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" + +/** + * Performance test statistics (per core) + */ +typedef struct { + int64_t event_count; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; +} perf_stat_t; + +/** + * Performance test event + */ +typedef struct { + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Perf test shared memory, read-only after start-up, allow cache-line sharing + */ +typedef struct { + /* EO table */ + em_eo_t eo_tbl[NUM_EO]; + /* Event pool used by this application */ + em_pool_t pool; +} perf_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; +/** + * Core specific test statistics. + * + * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, + * incremented per core during receive, measurement starts at 0. + */ +static ENV_LOCAL perf_stat_t core_stat = {.event_count = 0}; + +/* + * Local function prototypes + */ + +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +perf_stop(void *eo_context, em_eo_t eo); + +static void +perf_receive_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context); + +static void +print_result(perf_stat_t *const perf_stat); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Loop performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(test_error_handler); + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf init failed on EM-core:%u", em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Loop performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start application EOs + * Send initial test events to the EOs' queues + */ + em_queue_t queues[NUM_EO]; + + for (int i = 0; i < NUM_EO; i++) { + em_queue_t queue; + em_eo_t eo; + em_eo_multircv_param_t eo_param; + em_status_t ret, start_ret = EM_ERROR; + + /* Create the EO's loop queue */ + queue = em_queue_create("queue A", QUEUE_TYPE, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue creation failed, round:%d", i); + queues[i] = queue; + + /* Init & create the EO */ + em_eo_multircv_param_init(&eo_param); + /* Set EO params needed by this application */ + eo_param.start = perf_start; + eo_param.stop = perf_stop; + eo_param.receive_multi = perf_receive_multi; + /* eo_param.max_events = use default; */ + eo = em_eo_create_multircv("loop-eo", &eo_param); + test_fatal_if(eo == EM_EO_UNDEF, + "EO(%d) creation failed!", i); + perf_shm->eo_tbl[i] = eo; + + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, queue); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + } + + for (int i = 0; i < NUM_EO; i++) { + em_queue_t queue = queues[i]; + em_event_t events[NUM_EVENT_PER_QUEUE]; + int num, tot = 0; + + /* Alloc and send test events */ + do { + num = em_alloc_multi(events, NUM_EVENT_PER_QUEUE - tot, + sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + tot += num; + } while (tot < num && num > 0); + test_fatal_if(tot != NUM_EVENT_PER_QUEUE, + "Allocated:%d of requested:%d events", + tot, NUM_EVENT_PER_QUEUE); + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; + const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; + int num_sent = 0; + int m, n; + + for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { + num_sent += em_send_multi(&events[n], SEND_MULTI_MAX, + queue); + } + if (left_over) { + num_sent += em_send_multi(&events[n], left_over, + queue); + } + test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT_PER_QUEUE, queue); + } + + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < NUM_EO; i++) { + /* Stop & delete EO */ + eo = perf_shm->eo_tbl[i]; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } +} + +void +test_term(void) +{ + const int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_context; + (void)eo; + (void)conf; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +perf_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + return ret; +} + +/** + * @private + * + * EO receive function for EO A. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive_multi(void *eo_context, em_event_t event_tbl[], int num, + em_queue_t queue, void *queue_context) +{ + int64_t event_count = core_stat.event_count; + int ret; + + (void)eo_context; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free_multi(event_tbl, num); + return; + } + + if (unlikely(event_count == 0)) { + /* Start the measurement */ + core_stat.begin_cycles = env_get_cycle(); + } else if (unlikely(event_count >= PRINT_EVENT_COUNT)) { + /* End the measurement */ + core_stat.end_cycles = env_get_cycle(); + /* Print results and restart */ + core_stat.print_count += 1; + print_result(&core_stat); + /* Restart the measurement next round */ + event_count = -num; /* +num below => 0 */ + } + + if (ALLOC_FREE_PER_EVENT) { + em_free_multi(event_tbl, num); + int ret = em_alloc_multi(event_tbl, num, sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ret != num, "Allocated %d of num:%d events", + ret, num); + } + + /* Send the event back into the queue it originated from, i.e. loop */ + ret = em_send_multi(event_tbl, num, queue); + if (unlikely(ret != num)) { + em_free_multi(&event_tbl[ret], num - ret); + test_fatal_if(!appl_shm->exit_flag, + "Send-multi:%d Num:%d Queue:%" PRI_QUEUE "", + ret, num, queue); + } + + event_count += num; + core_stat.event_count = event_count; +} + +/** + * Prints test measurement result + */ +static void +print_result(perf_stat_t *const perf_stat) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event, events_per_sec; + uint64_t print_count; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + diff = env_cycles_diff(perf_stat->end_cycles, perf_stat->begin_cycles); + + print_count = perf_stat->print_count; + cycles_per_event = ((double)diff) / ((double)perf_stat->event_count); + events_per_sec = mhz / cycles_per_event; /* Million events/s */ + + APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, + mhz, em_core_id(), print_count); +} diff --git a/programs/performance/loop_refs.c b/programs/performance/loop_refs.c new file mode 100644 index 00000000..19f1a345 --- /dev/null +++ b/programs/performance/loop_refs.c @@ -0,0 +1,464 @@ +/* + * Copyright (c) 2023, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test example using event references. + * + * Test based on the 'loop' test but changed to use event references instead of + * separate events. + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EOs in the system. The test has a number of EOs, each + * with one queue. Each EO receives events (references) through its dedicated + * queue and sends them right back into the same queue, thus looping the events. + * Each sent event is a reference in the example. + * + * Based on the 'pairs' performance test, but instead of forwarding events + * between queues, here we loop them back into the same queue (which is usually + * faster). Also 'loop_refs' only uses one queue priority level. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test configuration + */ + +/** Number of test EOs and queues. Must be an even number. */ +#define NUM_EO 128 + +/** Number of events per queue */ +#define NUM_EVENT_PER_QUEUE 128 /* Increase the value to tune performance */ + +/** sizeof data[DATA_SIZE] in bytes in the event payload */ +#define DATA_SIZE 250 + +/** Max number of cores */ +#define MAX_NBR_OF_CORES 256 + +/** The number of events to be received before printing a result */ +#define PRINT_EVENT_COUNT 0xff0000 + +/** EM Queue type used */ +#define QUEUE_TYPE EM_QUEUE_TYPE_PARALLEL + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/* + * Options + */ + +/** Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_FMT \ +"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" + +/** + * Performance test statistics (per core) + */ +typedef struct { + int64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; +} perf_stat_t; + +/** + * Performance test event + */ +typedef struct { + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Perf test shared memory, read-only after start-up, allow cache-line sharing + */ +typedef struct { + /* EO table */ + em_eo_t eo_tbl[NUM_EO]; + /* Event pool used by this application */ + em_pool_t pool; +} perf_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; +/** + * Core specific test statistics. + * + * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, + * incremented per core during receive, measurement starts at 0. + */ +static ENV_LOCAL perf_stat_t core_stat = {.events = -PRINT_EVENT_COUNT}; + +/* + * Local function prototypes + */ + +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +perf_stop(void *eo_context, em_eo_t eo); + +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +print_result(perf_stat_t *const perf_stat); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Loop performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(test_error_handler); + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf init failed on EM-core:%u", em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Loop performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start application EOs + * Send initial test events to the EOs' queues + */ + em_queue_t queues[NUM_EO]; + + for (int i = 0; i < NUM_EO; i++) { + em_queue_t queue; + em_eo_t eo; + em_status_t ret, start_ret = EM_ERROR; + + /* Create the EO's loop queue */ + queue = em_queue_create("queue A", QUEUE_TYPE, + EM_QUEUE_PRIO_NORMAL, + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(queue == EM_QUEUE_UNDEF, + "Queue creation failed, round:%d", i); + queues[i] = queue; + + /* Create the EO */ + eo = em_eo_create("loop-eo", perf_start, NULL, perf_stop, NULL, + perf_receive, NULL); + test_fatal_if(eo == EM_EO_UNDEF, + "EO(%d) creation failed!", i); + perf_shm->eo_tbl[i] = eo; + + ret = em_eo_add_queue_sync(eo, queue); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, queue); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + } + + /* Alloc one test event from which references are created */ + em_event_t ev = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ev == EM_EVENT_UNDEF, + "Event allocation failed"); + + for (int i = 0; i < NUM_EO; i++) { + em_queue_t queue = queues[i]; + em_event_t events[NUM_EVENT_PER_QUEUE]; + + for (int j = 0; j < NUM_EVENT_PER_QUEUE; j++) { + em_event_t ref = em_event_ref(ev); + + test_fatal_if(ref == EM_EVENT_UNDEF, + "Event ref creation failed (%d, %d)", i, j); + events[j] = ref; + } + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; + const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; + int num_sent = 0; + int m, n; + + for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { + num_sent += em_send_multi(&events[n], SEND_MULTI_MAX, + queue); + } + if (left_over) { + num_sent += em_send_multi(&events[n], left_over, + queue); + } + test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT_PER_QUEUE, queue); + } + + /* Free the original event */ + em_free(ev); + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < NUM_EO; i++) { + /* Stop & delete EO */ + eo = perf_shm->eo_tbl[i]; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } +} + +void +test_term(void) +{ + const int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_context; + (void)eo; + (void)conf; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +perf_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + return ret; +} + +/** + * @private + * + * EO receive function for EO A. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + em_status_t ret; + + (void)eo_context; + (void)type; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + /* Start the measurement */ + core_stat.begin_cycles = env_get_cycle(); + } else if (unlikely(events == PRINT_EVENT_COUNT)) { + /* End the measurement */ + core_stat.end_cycles = env_get_cycle(); + /* Print results and restart */ + core_stat.print_count += 1; + print_result(&core_stat); + /* Restart the measurement next round */ + events = -1; /* +1 below => 0 */ + } + + if (ALLOC_FREE_PER_EVENT) { + em_free(event); + event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + } + + /* Send the event back into the queue it originated from, i.e. loop */ + ret = em_send(event, queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, queue); + } + + events++; + core_stat.events = events; +} + +/** + * Prints test measurement result + */ +static void +print_result(perf_stat_t *const perf_stat) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event, events_per_sec; + uint64_t print_count; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + diff = env_cycles_diff(perf_stat->end_cycles, perf_stat->begin_cycles); + + print_count = perf_stat->print_count; + cycles_per_event = ((double)diff) / ((double)perf_stat->events); + events_per_sec = mhz / cycles_per_event; /* Million events/s */ + + APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, + mhz, em_core_id(), print_count); +} diff --git a/programs/performance/pairs.c b/programs/performance/pairs.c index 020b8354..5916fb9a 100644 --- a/programs/performance/pairs.c +++ b/programs/performance/pairs.c @@ -1,558 +1,554 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test example - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of EO pairs in the system. Test has a number of EO - * pairs, that send ping-pong events. Depending on test dynamics (e.g. single - * burst in atomic queue) only one EO of a pair might be active at a time. - * - * Uses three different queue priority levels that affect scheduling (might - * starve low prio queues if using a strict prio scheduler). - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test configuration - */ - -/** Number of test EOs and queues. Must be an even number. */ -#define NUM_EO 128 - -/** Number of events per queue */ -#define NUM_EVENT_PER_QUEUE 32 /* Increase the value to tune performance */ - -/** sizeof data[DATA_SIZE] in bytes in the event payload */ -#define DATA_SIZE 250 - -/** Max number of cores */ -#define MAX_NBR_OF_CORES 256 - -/** The number of events to be received before printing a result */ -#define PRINT_EVENT_COUNT 0xff0000 - -/** EM Queue type used */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/** Define how many events are sent per em_send_multi() call */ -#define SEND_MULTI_MAX 32 - -/* - * Options - */ - -/** Use different priority levels for the queues created */ -#define USE_DIFF_QUEUE_PRIO_LEVELS 0 - -/** Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_FMT \ -"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" - -/** - * Performance test statistics (per core) - */ -typedef struct { - int64_t events; - uint64_t begin_cycles; - uint64_t end_cycles; - uint64_t print_count; -} perf_stat_t; - -/** - * Performance test EO context - */ -typedef struct { - /* Next destination queue */ - em_queue_t dest; -} eo_context_t; - -/** - * Performance test event - */ -typedef struct { - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Perf test shared memory, read-only after start-up, allow cache-line sharing - */ -typedef struct { - /* EO context table */ - eo_context_t eo_ctx_tbl[NUM_EO]; - /* EO table */ - em_eo_t eo_tbl[NUM_EO]; - /* Event pool used by this application */ - em_pool_t pool; -} perf_shm_t; - -/** EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; -/** - * Core specific test statistics. - * - * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, - * incremented per core during receive, measurement starts at 0. - */ -static ENV_LOCAL perf_stat_t core_stat = {.events = -PRINT_EVENT_COUNT}; - -/* - * Local function prototypes - */ - -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -perf_stop(void *eo_context, em_eo_t eo); - -static void -perf_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx); - -static void -print_result(perf_stat_t *const perf_stat); - -static em_queue_prio_t -get_queue_priority(const int index); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Init of the Pairs performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(test_error_handler); - } else { - perf_shm = env_shared_lookup("PerfSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf init failed on EM-core:%u", em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Pairs performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - /* - * Create and start application pairs. - * Send initial test events to the queues. - */ - em_queue_t queues_a[NUM_EO / 2]; - em_queue_t queues_b[NUM_EO / 2]; - - for (int i = 0; i < NUM_EO / 2; i++) { - em_queue_t queue_a, queue_b; - eo_context_t *eo_ctx_a, *eo_ctx_b; - em_eo_t eo; - em_status_t ret, start_ret = EM_ERROR; - - /* Create both queues for the pair */ - queue_a = em_queue_create("queue-A", QUEUE_TYPE, - get_queue_priority(i), - EM_QUEUE_GROUP_DEFAULT, NULL); - queue_b = em_queue_create("queue-B", QUEUE_TYPE, - get_queue_priority(i), - EM_QUEUE_GROUP_DEFAULT, NULL); - test_fatal_if(queue_a == EM_QUEUE_UNDEF || - queue_b == EM_QUEUE_UNDEF, - "Queue creation failed, round:%d", i); - queues_a[i] = queue_a; - queues_b[i] = queue_b; - - /* Create EO "A" */ - eo_ctx_a = &perf_shm->eo_ctx_tbl[2 * i]; - - eo = em_eo_create("pairs-eo-a", perf_start, NULL, perf_stop, - NULL, perf_receive, eo_ctx_a); - test_fatal_if(eo == EM_EO_UNDEF, - "EO(%d) creation failed!", 2 * i); - perf_shm->eo_tbl[2 * i] = eo; - eo_ctx_a->dest = queue_b; - - ret = em_eo_add_queue_sync(eo, queue_a); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - ret, eo, queue_a); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - - /* Create EO "B" */ - eo_ctx_b = &perf_shm->eo_ctx_tbl[2 * i + 1]; - - eo = em_eo_create("pairs-eo-b", perf_start, NULL, perf_stop, - NULL, perf_receive, eo_ctx_b); - test_fatal_if(eo == EM_EO_UNDEF, - "EO(%d) creation failed!", 2 * i + 1); - perf_shm->eo_tbl[2 * i + 1] = eo; - eo_ctx_b->dest = queue_a; - - ret = em_eo_add_queue_sync(eo, queue_b); - test_fatal_if(ret != EM_OK, - "EO add queue:%" PRI_STAT "\n" - "EO:%" PRI_EO " queue:%" PRI_QUEUE "", - ret, eo, queue_b); - - ret = em_eo_start_sync(eo, &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start:%" PRI_STAT " %" PRI_STAT "", - ret, start_ret); - } - - for (int i = 0; i < NUM_EO / 2; i++) { - em_queue_t queue_a = queues_a[i]; - em_queue_t queue_b = queues_b[i]; - em_event_t events_a[NUM_EVENT_PER_QUEUE]; - em_event_t events_b[NUM_EVENT_PER_QUEUE]; - - /* Alloc and send test events */ - for (int j = 0; j < NUM_EVENT_PER_QUEUE; j++) { - em_event_t ev_a, ev_b; - - ev_a = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - ev_b = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(ev_a == EM_EVENT_UNDEF || - ev_b == EM_EVENT_UNDEF, - "Event allocation failed (%d, %d)", i, j); - events_a[j] = ev_a; - events_b[j] = ev_b; - } - - /* Send in bursts of 'SEND_MULTI_MAX' events */ - const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; - const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; - int num_sent = 0; - int m, n; - - for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { - num_sent += em_send_multi(&events_a[n], SEND_MULTI_MAX, - queue_a); - } - if (left_over) { - num_sent += em_send_multi(&events_a[n], left_over, - queue_a); - } - test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, NUM_EVENT_PER_QUEUE, queue_a); - - num_sent = 0; - for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { - num_sent += em_send_multi(&events_b[n], SEND_MULTI_MAX, - queue_b); - } - if (left_over) { - num_sent += em_send_multi(&events_b[n], left_over, - queue_b); - } - test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, - "Event send multi failed:%d (%d)\n" - "Q:%" PRI_QUEUE "", - num_sent, NUM_EVENT_PER_QUEUE, queue_b); - } - - env_sync_mem(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - for (i = 0; i < NUM_EO; i++) { - /* Stop & delete EO */ - eo = perf_shm->eo_tbl[i]; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - } -} - -void -test_term(void) -{ - const int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(perf_shm); - em_unregister_error_handler(); - } -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - (void)eo_context; - (void)eo; - (void)conf; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - * - */ -static em_status_t -perf_stop(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - return ret; -} - -/** - * @private - * - * EO receive function for EO A. - * - * Loops back events and calculates the event rate. - */ -static void -perf_receive(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *queue_context) -{ - int64_t events = core_stat.events; - eo_context_t *const eo_ctx = eo_context; - const em_queue_t dst_queue = eo_ctx->dest; - em_status_t ret; - - (void)type; - (void)queue; - (void)queue_context; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (unlikely(events == 0)) { - /* Start the measurement */ - core_stat.begin_cycles = env_get_cycle(); - } else if (unlikely(events == PRINT_EVENT_COUNT)) { - /* End the measurement */ - core_stat.end_cycles = env_get_cycle(); - /* Print results and restart */ - core_stat.print_count += 1; - print_result(&core_stat); - /* Restart the measurement next round */ - events = -1; /* +1 below => 0 */ - } - - if (ALLOC_FREE_PER_EVENT) { - em_free(event); - event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); - } - - /* Send the event into the next queue */ - ret = em_send(event, dst_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dst_queue); - } - - events++; - core_stat.events = events; -} - -/** - * Get queue priority value based on the index number. - * - * @param Queue index - * - * @return Queue priority value - * - * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH - */ -static em_queue_prio_t -get_queue_priority(const int queue_index) -{ - em_queue_prio_t prio; - - if (USE_DIFF_QUEUE_PRIO_LEVELS) { - int remainder = queue_index % 5; - - if (remainder <= 1) - prio = EM_QUEUE_PRIO_LOW; - else if (remainder <= 3) - prio = EM_QUEUE_PRIO_NORMAL; - else - prio = EM_QUEUE_PRIO_HIGH; - } else { - prio = EM_QUEUE_PRIO_NORMAL; - } - - return prio; -} - -/** - * Prints test measurement result - */ -static void -print_result(perf_stat_t *const perf_stat) -{ - uint64_t diff; - uint32_t hz; - double mhz; - double cycles_per_event, events_per_sec; - uint64_t print_count; - - hz = env_core_hz(); - mhz = ((double)hz) / 1000000.0; - - if (perf_stat->end_cycles > perf_stat->begin_cycles) - diff = perf_stat->end_cycles - perf_stat->begin_cycles; - else - diff = UINT64_MAX - perf_stat->begin_cycles + - perf_stat->end_cycles + 1; - - print_count = perf_stat->print_count; - cycles_per_event = ((double)diff) / ((double)perf_stat->events); - events_per_sec = mhz / cycles_per_event; /* Million events/s */ - - APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, - mhz, em_core_id(), print_count); -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test example + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of EO pairs in the system. Test has a number of EO + * pairs, that send ping-pong events. Depending on test dynamics (e.g. single + * burst in atomic queue) only one EO of a pair might be active at a time. + * + * Uses three different queue priority levels that affect scheduling (might + * starve low prio queues if using a strict prio scheduler). + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test configuration + */ + +/** Number of test EOs and queues. Must be an even number. */ +#define NUM_EO 128 + +/** Number of events per queue */ +#define NUM_EVENT_PER_QUEUE 32 /* Increase the value to tune performance */ + +/** sizeof data[DATA_SIZE] in bytes in the event payload */ +#define DATA_SIZE 250 + +/** Max number of cores */ +#define MAX_NBR_OF_CORES 256 + +/** The number of events to be received before printing a result */ +#define PRINT_EVENT_COUNT 0xff0000 + +/** EM Queue type used */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/** Define how many events are sent per em_send_multi() call */ +#define SEND_MULTI_MAX 32 + +/* + * Options + */ + +/** Use different priority levels for the queues created */ +#define USE_DIFF_QUEUE_PRIO_LEVELS 0 + +/** Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* 0=False or 1=True */ + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_FMT \ +"cycles/event:% -8.2f Mevents/s/core: %-6.2f %5.0f MHz core%02d %" PRIu64 "\n" + +/** + * Performance test statistics (per core) + */ +typedef struct { + int64_t events; + uint64_t begin_cycles; + uint64_t end_cycles; + uint64_t print_count; +} perf_stat_t; + +/** + * Performance test EO context + */ +typedef struct { + /* Next destination queue */ + em_queue_t dest; +} eo_context_t; + +/** + * Performance test event + */ +typedef struct { + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Perf test shared memory, read-only after start-up, allow cache-line sharing + */ +typedef struct { + /* EO context table */ + eo_context_t eo_ctx_tbl[NUM_EO]; + /* EO table */ + em_eo_t eo_tbl[NUM_EO]; + /* Event pool used by this application */ + em_pool_t pool; +} perf_shm_t; + +/** EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; +/** + * Core specific test statistics. + * + * Allow for 'PRINT_EVENT_COUNT' warm-up rounds, + * incremented per core during receive, measurement starts at 0. + */ +static ENV_LOCAL perf_stat_t core_stat = {.events = -PRINT_EVENT_COUNT}; + +/* + * Local function prototypes + */ + +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +perf_stop(void *eo_context, em_eo_t eo); + +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx); + +static void +print_result(perf_stat_t *const perf_stat); + +static em_queue_prio_t +get_queue_priority(const int index); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Init of the Pairs performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(test_error_handler); + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf init failed on EM-core:%u", em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Pairs performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + /* + * Create and start application pairs. + * Send initial test events to the queues. + */ + em_queue_t queues_a[NUM_EO / 2]; + em_queue_t queues_b[NUM_EO / 2]; + + for (int i = 0; i < NUM_EO / 2; i++) { + em_queue_t queue_a, queue_b; + eo_context_t *eo_ctx_a, *eo_ctx_b; + em_eo_t eo; + em_status_t ret, start_ret = EM_ERROR; + + /* Create both queues for the pair */ + queue_a = em_queue_create("queue-A", QUEUE_TYPE, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + queue_b = em_queue_create("queue-B", QUEUE_TYPE, + get_queue_priority(i), + EM_QUEUE_GROUP_DEFAULT, NULL); + test_fatal_if(queue_a == EM_QUEUE_UNDEF || + queue_b == EM_QUEUE_UNDEF, + "Queue creation failed, round:%d", i); + queues_a[i] = queue_a; + queues_b[i] = queue_b; + + /* Create EO "A" */ + eo_ctx_a = &perf_shm->eo_ctx_tbl[2 * i]; + + eo = em_eo_create("pairs-eo-a", perf_start, NULL, perf_stop, + NULL, perf_receive, eo_ctx_a); + test_fatal_if(eo == EM_EO_UNDEF, + "EO(%d) creation failed!", 2 * i); + perf_shm->eo_tbl[2 * i] = eo; + eo_ctx_a->dest = queue_b; + + ret = em_eo_add_queue_sync(eo, queue_a); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + ret, eo, queue_a); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + + /* Create EO "B" */ + eo_ctx_b = &perf_shm->eo_ctx_tbl[2 * i + 1]; + + eo = em_eo_create("pairs-eo-b", perf_start, NULL, perf_stop, + NULL, perf_receive, eo_ctx_b); + test_fatal_if(eo == EM_EO_UNDEF, + "EO(%d) creation failed!", 2 * i + 1); + perf_shm->eo_tbl[2 * i + 1] = eo; + eo_ctx_b->dest = queue_a; + + ret = em_eo_add_queue_sync(eo, queue_b); + test_fatal_if(ret != EM_OK, + "EO add queue:%" PRI_STAT "\n" + "EO:%" PRI_EO " queue:%" PRI_QUEUE "", + ret, eo, queue_b); + + ret = em_eo_start_sync(eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", + ret, start_ret); + } + + for (int i = 0; i < NUM_EO / 2; i++) { + em_queue_t queue_a = queues_a[i]; + em_queue_t queue_b = queues_b[i]; + em_event_t events_a[NUM_EVENT_PER_QUEUE]; + em_event_t events_b[NUM_EVENT_PER_QUEUE]; + + /* Alloc and send test events */ + for (int j = 0; j < NUM_EVENT_PER_QUEUE; j++) { + em_event_t ev_a, ev_b; + + ev_a = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + ev_b = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(ev_a == EM_EVENT_UNDEF || + ev_b == EM_EVENT_UNDEF, + "Event allocation failed (%d, %d)", i, j); + events_a[j] = ev_a; + events_b[j] = ev_b; + } + + /* Send in bursts of 'SEND_MULTI_MAX' events */ + const int send_rounds = NUM_EVENT_PER_QUEUE / SEND_MULTI_MAX; + const int left_over = NUM_EVENT_PER_QUEUE % SEND_MULTI_MAX; + int num_sent = 0; + int m, n; + + for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { + num_sent += em_send_multi(&events_a[n], SEND_MULTI_MAX, + queue_a); + } + if (left_over) { + num_sent += em_send_multi(&events_a[n], left_over, + queue_a); + } + test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT_PER_QUEUE, queue_a); + + num_sent = 0; + for (m = 0, n = 0; m < send_rounds; m++, n += SEND_MULTI_MAX) { + num_sent += em_send_multi(&events_b[n], SEND_MULTI_MAX, + queue_b); + } + if (left_over) { + num_sent += em_send_multi(&events_b[n], left_over, + queue_b); + } + test_fatal_if(num_sent != NUM_EVENT_PER_QUEUE, + "Event send multi failed:%d (%d)\n" + "Q:%" PRI_QUEUE "", + num_sent, NUM_EVENT_PER_QUEUE, queue_b); + } + + env_sync_mem(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + for (i = 0; i < NUM_EO; i++) { + /* Stop & delete EO */ + eo = perf_shm->eo_tbl[i]; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + } +} + +void +test_term(void) +{ + const int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +perf_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_context; + (void)eo; + (void)conf; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + * + */ +static em_status_t +perf_stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + return ret; +} + +/** + * @private + * + * EO receive function for EO A. + * + * Loops back events and calculates the event rate. + */ +static void +perf_receive(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *queue_context) +{ + int64_t events = core_stat.events; + eo_context_t *const eo_ctx = eo_context; + const em_queue_t dst_queue = eo_ctx->dest; + em_status_t ret; + + (void)type; + (void)queue; + (void)queue_context; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (unlikely(events == 0)) { + /* Start the measurement */ + core_stat.begin_cycles = env_get_cycle(); + } else if (unlikely(events == PRINT_EVENT_COUNT)) { + /* End the measurement */ + core_stat.end_cycles = env_get_cycle(); + /* Print results and restart */ + core_stat.print_count += 1; + print_result(&core_stat); + /* Restart the measurement next round */ + events = -1; /* +1 below => 0 */ + } + + if (ALLOC_FREE_PER_EVENT) { + em_free(event); + event = em_alloc(sizeof(perf_event_t), EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Event alloc fails"); + } + + /* Send the event into the next queue */ + ret = em_send(event, dst_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "Send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dst_queue); + } + + events++; + core_stat.events = events; +} + +/** + * Get queue priority value based on the index number. + * + * @param Queue index + * + * @return Queue priority value + * + * @note Priority distribution: 40% LOW, 40% NORMAL, 20% HIGH + */ +static em_queue_prio_t +get_queue_priority(const int queue_index) +{ + em_queue_prio_t prio; + + if (USE_DIFF_QUEUE_PRIO_LEVELS) { + int remainder = queue_index % 5; + + if (remainder <= 1) + prio = EM_QUEUE_PRIO_LOW; + else if (remainder <= 3) + prio = EM_QUEUE_PRIO_NORMAL; + else + prio = EM_QUEUE_PRIO_HIGH; + } else { + prio = EM_QUEUE_PRIO_NORMAL; + } + + return prio; +} + +/** + * Prints test measurement result + */ +static void +print_result(perf_stat_t *const perf_stat) +{ + uint64_t diff; + uint32_t hz; + double mhz; + double cycles_per_event, events_per_sec; + uint64_t print_count; + + hz = env_core_hz(); + mhz = ((double)hz) / 1000000.0; + + diff = env_cycles_diff(perf_stat->end_cycles, perf_stat->begin_cycles); + + print_count = perf_stat->print_count; + cycles_per_event = ((double)diff) / ((double)perf_stat->events); + events_per_sec = mhz / cycles_per_event; /* Million events/s */ + + APPL_PRINT(RESULT_PRINTF_FMT, cycles_per_event, events_per_sec, + mhz, em_core_id(), print_count); +} diff --git a/programs/performance/queues.c b/programs/performance/queues.c index 6ccf33ce..aa8ab526 100644 --- a/programs/performance/queues.c +++ b/programs/performance/queues.c @@ -1,1031 +1,1031 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test. - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of queues and events in the system. The test increases - * the number of queues[+events] for each measurement round and prints the - * results. The test will stop if the maximum number of supported queues by the - * system is reached. - * - * Plot the cycles/event to get an idea of how the system scales with an - * increasing number of queues. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test options: - */ - -/* Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ - -/* - * Create all EM queues at startup or create the queues during - * the test in steps. - */ -#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ - -/* - * Measure the send-enqueue-schedule-receive latency. Measured separately for - * 'high priority and 'low priority' queues (ratio 1:4). - */ -#define MEASURE_LATENCY 1 /* false=0 or true=1 */ - -/* - * Keep the number of events constant while increasing the number of queues. - * Should be dividable by or factor of queue_step. - */ -#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ - -/* - * Test configuration: - */ - -#define MAX_CORES 64 - -/* Number of EO's and queues in a loop */ -#define NUM_EOS 4 - -/* Number of events per queue */ -#define NUM_EVENTS 4 - -#if CONST_NUM_EVENTS > 0 -/* - * Total number of queues when using a constant number of events. - * Make sure that all queues get 'NUM_EVENTS' events per queue. - */ -#define NUM_QUEUES (CONST_NUM_EVENTS / NUM_EVENTS) -#else -/* - * Total number of queues when increasing the total event count for each queue - * step. - */ -#define NUM_QUEUES (NUM_EOS * 16 * 1024) -#endif - -/* Number of data bytes in an event */ -#define DATA_SIZE 128 - -/* Samples before adding more queues */ -#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ - -/* Num events a core processes between samples */ -#define EVENTS_PER_SAMPLE 0x400000 - -/* EM queue type */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/* Core states during test. */ -#define CORE_STATE_MEASURE 0 -#define CORE_STATE_IDLE 1 - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" -#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" - -/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ -#define RESULT_PRINTF_LATENCY_HDR \ -"Cycles/ Events/ Latency:\n" \ -" Event Sec hi-ave hi-max lo-ave lo-max cpu-freq\n" -#define RESULT_PRINTF_LATENCY_FMT \ -"%6.0f %7.2f M %8.0f %7" PRIu64 " %7.0f %7" PRIu64 " %5.0f MHz %" PRIu64 "\n" - -/* - * The number of scheduled queues to use in each test step. - * - * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of - * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop - * before reaching the end of the list). - */ -static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, - 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; - -/** - * Test state, - * cache line alignment and padding handled in 'perf_shm_t' - */ -typedef struct { - int queues; - int step; - int samples; - int num_cores; - int reset_flag; - double cpu_mhz; - uint64_t cpu_hz; - uint64_t print_count; - env_atomic64_t ready_count; - /* if using CONST_NUM_EVENTS:*/ - int free_flag; - env_atomic64_t freed_count; -} test_status_t; - -/** - * Performance test statistics (per core) - */ -typedef struct { - uint64_t events; - env_time_t begin_time; - env_time_t end_time; - env_time_t diff_time; - struct { - uint64_t events; - env_time_t hi_prio_ave; - env_time_t hi_prio_max; - env_time_t lo_prio_ave; - env_time_t lo_prio_max; - } latency; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, - CORE_STAT_SIZE_ERROR); - -/** - * EO context data - */ -typedef struct { - em_eo_t eo_id; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} eo_context_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, - EO_CONTEXT_T__SIZE_ERROR); - -/** - * Queue context data - */ -typedef struct { - /** This queue */ - em_queue_t this_queue; - /** Next queue */ - em_queue_t next_queue; - /** Priority of 'this_queue' */ - em_queue_prio_t prio; - /** Type of 'this_queue' */ - em_queue_type_t type; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_context_t; - -COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, - QUEUE_CONTEXT_SIZE_ERROR); - -/** - * Performance test event - */ -typedef struct { - /* Send time stamp */ - env_time_t send_time; - /* Sequence number */ - int seq; - /* Test data */ - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - - test_status_t test_status ENV_CACHE_LINE_ALIGNED; - - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; - - eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; - - queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; - /* EO ID's */ - em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; -} perf_shm_t; - -COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, - PERF_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; - -/* EM-core local state */ -static ENV_LOCAL int core_state = CORE_STATE_MEASURE; - -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); - -static void -queue_step(void); - -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context); - -static int -update_test_state(em_event_t event); - -static void -create_and_link_queues(int start_queue, int num_queues); - -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]); - -static inline em_event_t -alloc_free_per_event(em_event_t event); - -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Test error handler - * - * @param eo Execution object id - * @param error The error code - * @param escope Error scope - * @param args List of arguments (__FILE__, __func__, __LINE__, - * (format), ## __VA_ARGS__) - * - * @return The original error code. - */ -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) -{ - if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { - APPL_PRINT("\nUnable to create more queues\n\n" - "Test finished\n"); - raise(SIGINT); - return error; - } - - if (appl_shm->exit_flag && EM_ESCOPE(escope) && - !EM_ERROR_IS_FATAL(error)) { - /* Suppress non-fatal EM-error logs during tear-down */ - if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { - APPL_PRINT("\nExit: suppress queue setup error\n\n"); - return error; - } - } - - return test_error_handler(eo, error, escope, args); -} - -/** - * Init of the Queues performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfQueuesSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(error_handler); - } else { - perf_shm = env_shared_lookup("PerfQueuesSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf test queues init failed on EM-core: %u\n", - em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Queues performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - eo_context_t *eo_ctx; - em_status_t ret, start_ret = EM_ERROR; - const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); - int i; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - " Max. NUM_QUEUES: %i\n" - " sizeof queue_context_tbl: %i kB\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - perf_shm->test_status.cpu_hz = env_core_hz(); - perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / - 1000000.0; - perf_shm->test_status.reset_flag = 0; - perf_shm->test_status.num_cores = em_core_count(); - perf_shm->test_status.free_flag = 0; - - env_atomic64_init(&perf_shm->test_status.ready_count); - env_atomic64_init(&perf_shm->test_status.freed_count); - - /* Create EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo_ctx = &perf_shm->eo_context_tbl[i]; - perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, - stop, NULL, receive_func, - eo_ctx); - test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, - "EO create failed:%d", i, NUM_EOS); - } - - APPL_PRINT(" EOs created\n"); - - /* - * Create and link queues - */ - if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ - create_and_link_queues(0, NUM_QUEUES); - else /* Create queues for the first step, then more before each step */ - create_and_link_queues(0, queue_steps[0]); - - /* Start EOs */ - for (i = 0; i < NUM_EOS; i++) { - ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start(%d):%" PRI_STAT " %" PRI_STAT "", - i, ret, start_ret); - } - - queue_step(); -} - -/** - * Stop the test, only run on one core - */ -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* Stop EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", - eo, ret); - } - - /* Remove and delete all of the EO's queues, then delete the EO */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE/*delete Qs*/); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", - eo, ret); - } -} - -/** - * Terminate the test, only run on one core - */ -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - env_shared_free(perf_shm); - em_unregister_error_handler(); -} - -/** - * Allocate, initialize and send test step events. - */ -static void -queue_step(void) -{ - queue_context_t *q_ctx; - em_event_t event; - perf_event_t *perf_event; - em_status_t ret; - const int first = perf_shm->test_status.queues; - const int step = perf_shm->test_status.step; - const int queue_count = queue_steps[step]; - int i, j; - - /* Allocate and send test events for the queues in the first step */ - if (CONST_NUM_EVENTS) { - for (i = 0; i < CONST_NUM_EVENTS; i++) { - event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "EM alloc failed (%i)", i); - perf_event = em_event_pointer(event); - perf_event->seq = i; - perf_event->send_time = env_time_global(); - - /* Allocate events evenly to the queues */ - q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; - ret = em_send(event, q_ctx->this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, q_ctx->this_queue); - em_free(event); - return; - } - } - } else { - for (i = first; i < queue_count; i++) { - q_ctx = &perf_shm->queue_context_tbl[i]; - - for (j = 0; j < NUM_EVENTS; j++) { - event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "EM alloc failed (%d)", i); - - perf_event = em_event_pointer(event); - perf_event->seq = i * NUM_EVENTS + j; - perf_event->send_time = env_time_global(); - - ret = em_send(event, q_ctx->this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, q_ctx->this_queue); - em_free(event); - return; - } - } - } - } - - perf_shm->test_status.queues = queue_count; - perf_shm->test_status.step++; - - APPL_PRINT("\nNumber of queues: %d\n", perf_shm->test_status.queues); - if (CONST_NUM_EVENTS) - APPL_PRINT("Number of events: %d\n", CONST_NUM_EVENTS); - else - APPL_PRINT("Number of events: %d\n", - perf_shm->test_status.queues * NUM_EVENTS); -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo_id = eo; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - return EM_OK; -} - -/** - * @private - * - * EO receive function. - * - * Loops back events and calculates the event rate. - */ -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context) -{ - env_time_t recv_time; - perf_event_t *perf_event; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (MEASURE_LATENCY) { - recv_time = env_time_global(); - perf_event = em_event_pointer(event); - } - - queue_context_t *q_ctx; - em_queue_t dst_queue; - em_status_t ret; - int do_return; - - (void)eo_context; - (void)type; - - q_ctx = q_context; - - /* - * Helper: Update the test state, count recv events, - * calc & print stats, prepare for next step - */ - do_return = update_test_state(event); - if (unlikely(do_return)) - return; - - if (ALLOC_FREE_PER_EVENT) - event = alloc_free_per_event(event); - - dst_queue = q_ctx->next_queue; - test_fatal_if(queue != q_ctx->this_queue, "Queue config error"); - - if (MEASURE_LATENCY) { - measure_latency(perf_event, q_ctx, recv_time); - perf_event->send_time = env_time_global(); - } - /* Send the event to the next queue */ - ret = em_send(event, dst_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dst_queue); - } -} - -/** - * Receive function helper: Update the test state - * - * Calculates the number of received events, maintains & prints test statistics - * and restarts/reconfigures the test for the next queue/event-setup - * - * @return '1' if the caller receive function should immediately return, - * '0' otherwise - */ -static inline int -update_test_state(em_event_t event) -{ - uint64_t events; - uint64_t freed_count; - uint64_t ready_count; - const int core = em_core_id(); - test_status_t *const tstat = &perf_shm->test_status; - core_stat_t *const cstat = &perf_shm->core_stat[core]; - - events = cstat->events; - events++; - - if (unlikely(tstat->reset_flag)) { - events = 0; - if (CONST_NUM_EVENTS) { - /* Free all old events before allocating new ones. */ - if (unlikely(tstat->free_flag)) { - em_free(event); - freed_count = - env_atomic64_add_return(&tstat->freed_count, 1); - if (freed_count == CONST_NUM_EVENTS) { - /* Last event */ - env_atomic64_set(&tstat->freed_count, - 0); - tstat->reset_flag = 0; - tstat->free_flag = 0; - queue_step(); - } - /* Req caller receive-func to return */ - return 1; - } - } - - if (unlikely(core_state != CORE_STATE_IDLE)) { - core_state = CORE_STATE_IDLE; - cstat->begin_time = ENV_TIME_NULL; - - ready_count = - env_atomic64_add_return(&tstat->ready_count, 1); - - if (ready_count == (uint64_t)tstat->num_cores) { - env_atomic64_set(&tstat->ready_count, 0); - - if (CONST_NUM_EVENTS) { - int sample = tstat->samples; - int queues = tstat->queues; - - if (sample == 0 && queues < NUM_QUEUES) - tstat->free_flag = 1; - else - tstat->reset_flag = 0; - } else { - tstat->reset_flag = 0; - } - } - } - } else if (unlikely(events == 1)) { - cstat->begin_time = env_time_global(); - cstat->latency.events = 0; - cstat->latency.hi_prio_ave = ENV_TIME_NULL; - cstat->latency.hi_prio_max = ENV_TIME_NULL; - cstat->latency.lo_prio_ave = ENV_TIME_NULL; - cstat->latency.lo_prio_max = ENV_TIME_NULL; - - core_state = CORE_STATE_MEASURE; - } else if (unlikely(events == EVENTS_PER_SAMPLE)) { - /* - * Measurements done for this step. Store results and continue - * receiving events until all cores are done. - */ - env_time_t begin_time, end_time; - - cstat->end_time = env_time_global(); - - end_time = cstat->end_time; - begin_time = cstat->begin_time; - cstat->diff_time = env_time_diff(end_time, begin_time); - - ready_count = env_atomic64_add_return(&tstat->ready_count, 1); - - /* - * Check whether all cores are done with the step, - * and if done proceed to the next step - */ - if (unlikely((int)ready_count == tstat->num_cores)) { - /* No real need for atomicity here, ran on last core*/ - env_atomic64_set(&tstat->ready_count, 0); - - tstat->reset_flag = 1; - tstat->samples++; - - /* - * Print statistics. - * Omit prints for the first sample round to allow the - * test to stabilize after setups and teardowns. - */ - if (tstat->samples > 1) { - int print_header = tstat->samples == 2 ? 1 : 0; - - print_test_statistics(tstat, print_header, - perf_shm->core_stat); - } - - /* - * Start next test step - setup new queues - */ - if (tstat->samples == NUM_SAMPLES && - tstat->queues < NUM_QUEUES) { - if (!CREATE_ALL_QUEUES_AT_STARTUP) { - int step = tstat->step; - int first_q = tstat->queues; - int num_qs = queue_steps[step] - - queue_steps[step - 1]; - - create_and_link_queues(first_q, num_qs); - } - - if (!CONST_NUM_EVENTS) - queue_step(); - - tstat->samples = 0; - } - } - } - - cstat->events = events; - - return 0; -} - -/** - * Creates a number of EM queues, associates them with EOs, and links them. - */ -static void -create_and_link_queues(int start_queue, int num_queues) -{ - int i, j; - em_queue_t queue, prev_queue; - em_queue_prio_t prio; - em_queue_type_t type; - em_status_t ret; - queue_context_t *q_ctx; - - APPL_PRINT("\nCreate new queues: %d\n", num_queues); - - if (num_queues % NUM_EOS != 0) { - APPL_PRINT("%s() arg 'num_queues'=%d not multiple of NUM_EOS=%d\n", - __func__, num_queues, NUM_EOS); - return; - } - - for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { - prev_queue = EM_QUEUE_UNDEF; - - for (j = 0; j < NUM_EOS; j++) { - prio = EM_QUEUE_PRIO_NORMAL; - - if (MEASURE_LATENCY) { - if (j == 0) - prio = EM_QUEUE_PRIO_HIGH; - } - - type = QUEUE_TYPE; - queue = em_queue_create("queue", type, prio, - EM_QUEUE_GROUP_DEFAULT, NULL); - if (queue == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - i); - return; - } - - q_ctx = &perf_shm->queue_context_tbl[i + j]; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "em_queue_set_context():%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - - ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "em_eo_add_queue_sync():%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - em_queue_delete(queue); - return; - } - /* Link queues */ - q_ctx->this_queue = queue; - q_ctx->next_queue = prev_queue; - q_ctx->prio = prio; - q_ctx->type = type; - prev_queue = queue; - } - - /* Connect first queue to the last */ - q_ctx = &perf_shm->queue_context_tbl[i + 0]; - q_ctx->next_queue = prev_queue; - } - - APPL_PRINT("New Qs created:%d First:%" PRI_QUEUE " Last:%" PRI_QUEUE "\n", - num_queues, - perf_shm->queue_context_tbl[start_queue].this_queue, - perf_shm->queue_context_tbl[start_queue + - num_queues - 1].this_queue); -} - -/** - * Print test statistics - */ -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]) -{ - const int num_cores = test_status->num_cores; - const uint64_t cpu_hz = test_status->cpu_hz; - const double cpu_mhz = test_status->cpu_mhz; - const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; - const uint64_t print_count = test_status->print_count++; - env_time_t total_time = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) - total_time = env_time_sum(total_time, core_stat[i].diff_time); - - double cycles_per_event = 0.0; - double events_per_sec = 0.0; - - if (likely(total_events > 0)) - cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / - (double)total_events; - if (likely(cycles_per_event > 0)) /* Million events/s: */ - events_per_sec = cpu_mhz * num_cores / cycles_per_event; - - /* - * Print without latency statistics - */ - if (!MEASURE_LATENCY) { - if (print_header) - APPL_PRINT(RESULT_PRINTF_HDR); - APPL_PRINT(RESULT_PRINTF_FMT, - cycles_per_event, events_per_sec, - cpu_mhz, print_count); - return; - } - - /* - * Print with latency statistics - */ - uint64_t latency_events = 0; - env_time_t latency_hi_ave = ENV_TIME_NULL; - env_time_t latency_hi_max = ENV_TIME_NULL; - env_time_t latency_lo_ave = ENV_TIME_NULL; - env_time_t latency_lo_max = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) { - latency_events += core_stat[i].latency.events; - latency_hi_ave = env_time_sum(latency_hi_ave, - core_stat[i].latency.hi_prio_ave); - latency_lo_ave = env_time_sum(latency_lo_ave, - core_stat[i].latency.lo_prio_ave); - - if (env_time_cmp(core_stat[i].latency.hi_prio_max, - latency_hi_max) > 0) { - latency_hi_max = core_stat[i].latency.hi_prio_max; - } - if (env_time_cmp(core_stat[i].latency.lo_prio_max, - latency_lo_max) > 0) { - latency_lo_max = core_stat[i].latency.lo_prio_max; - } - } - - double lat_per_hi_ave = 0.0; - double lat_per_lo_ave = 0.0; - - if (likely(latency_events > 0)) { - lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / - (double)latency_events; - lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / - (double)latency_events; - } - - if (print_header) - APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); - APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, - cycles_per_event, events_per_sec, lat_per_hi_ave, - env_time_to_cycles(latency_hi_max, cpu_hz), - lat_per_lo_ave, - env_time_to_cycles(latency_lo_max, cpu_hz), - cpu_mhz, print_count); -} - -/** - * Free the input event and allocate a new one instead - */ -static inline em_event_t -alloc_free_per_event(em_event_t event) -{ - perf_event_t *perf_event = em_event_pointer(event); - env_time_t send_time = perf_event->send_time; - int seq = perf_event->seq; - size_t event_size = em_event_get_size(event); - - em_free(event); - - event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); - - perf_event = em_event_pointer(event); - - perf_event->send_time = send_time; - perf_event->seq = seq; - - return event; -} - -/** - * Measure the scheduling latency per event - */ -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time) -{ - const int core = em_core_id(); - core_stat_t *const cstat = &perf_shm->core_stat[core]; - const env_time_t send_time = perf_event->send_time; - env_time_t latency; - - if (perf_shm->test_status.reset_flag || - cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) - return; - - cstat->latency.events++; - - latency = env_time_diff(recv_time, send_time); - - if (q_ctx->prio == EM_QUEUE_PRIO_HIGH) { - cstat->latency.hi_prio_ave = - env_time_sum(cstat->latency.hi_prio_ave, latency); - if (env_time_cmp(latency, cstat->latency.hi_prio_max) > 0) - cstat->latency.hi_prio_max = latency; - } else { - cstat->latency.lo_prio_ave = - env_time_sum(cstat->latency.lo_prio_ave, latency); - if (env_time_cmp(latency, cstat->latency.lo_prio_max) > 0) - cstat->latency.lo_prio_max = latency; - } -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test. + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of queues and events in the system. The test increases + * the number of queues[+events] for each measurement round and prints the + * results. The test will stop if the maximum number of supported queues by the + * system is reached. + * + * Plot the cycles/event to get an idea of how the system scales with an + * increasing number of queues. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test options: + */ + +/* Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ + +/* + * Create all EM queues at startup or create the queues during + * the test in steps. + */ +#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ + +/* + * Measure the send-enqueue-schedule-receive latency. Measured separately for + * 'high priority and 'low priority' queues (ratio 1:4). + */ +#define MEASURE_LATENCY 1 /* false=0 or true=1 */ + +/* + * Keep the number of events constant while increasing the number of queues. + * Should be dividable by or factor of queue_step. + */ +#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ + +/* + * Test configuration: + */ + +#define MAX_CORES 64 + +/* Number of EO's and queues in a loop */ +#define NUM_EOS 4 + +/* Number of events per queue */ +#define NUM_EVENTS 4 + +#if CONST_NUM_EVENTS > 0 +/* + * Total number of queues when using a constant number of events. + * Make sure that all queues get 'NUM_EVENTS' events per queue. + */ +#define NUM_QUEUES (CONST_NUM_EVENTS / NUM_EVENTS) +#else +/* + * Total number of queues when increasing the total event count for each queue + * step. + */ +#define NUM_QUEUES (NUM_EOS * 16 * 1024) +#endif + +/* Number of data bytes in an event */ +#define DATA_SIZE 128 + +/* Samples before adding more queues */ +#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ + +/* Num events a core processes between samples */ +#define EVENTS_PER_SAMPLE 0x400000 + +/* EM queue type */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/* Core states during test. */ +#define CORE_STATE_MEASURE 0 +#define CORE_STATE_IDLE 1 + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" +#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" + +/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ +#define RESULT_PRINTF_LATENCY_HDR \ +"Cycles/ Events/ Latency:\n" \ +" Event Sec hi-ave hi-max lo-ave lo-max cpu-freq\n" +#define RESULT_PRINTF_LATENCY_FMT \ +"%6.0f %7.2f M %8.0f %7" PRIu64 " %7.0f %7" PRIu64 " %5.0f MHz %" PRIu64 "\n" + +/* + * The number of scheduled queues to use in each test step. + * + * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of + * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop + * before reaching the end of the list). + */ +static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, + 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; + +/** + * Test state, + * cache line alignment and padding handled in 'perf_shm_t' + */ +typedef struct { + int queues; + int step; + int samples; + int num_cores; + int reset_flag; + double cpu_mhz; + uint64_t cpu_hz; + uint64_t print_count; + env_atomic64_t ready_count; + /* if using CONST_NUM_EVENTS:*/ + int free_flag; + env_atomic64_t freed_count; +} test_status_t; + +/** + * Performance test statistics (per core) + */ +typedef struct { + uint64_t events; + env_time_t begin_time; + env_time_t end_time; + env_time_t diff_time; + struct { + uint64_t events; + env_time_t hi_prio_ave; + env_time_t hi_prio_max; + env_time_t lo_prio_ave; + env_time_t lo_prio_max; + } latency; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, + CORE_STAT_SIZE_ERROR); + +/** + * EO context data + */ +typedef struct { + em_eo_t eo_id; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} eo_context_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, + EO_CONTEXT_T__SIZE_ERROR); + +/** + * Queue context data + */ +typedef struct { + /** This queue */ + em_queue_t this_queue; + /** Next queue */ + em_queue_t next_queue; + /** Priority of 'this_queue' */ + em_queue_prio_t prio; + /** Type of 'this_queue' */ + em_queue_type_t type; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_context_t; + +COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, + QUEUE_CONTEXT_SIZE_ERROR); + +/** + * Performance test event + */ +typedef struct { + /* Send time stamp */ + env_time_t send_time; + /* Sequence number */ + int seq; + /* Test data */ + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + + test_status_t test_status ENV_CACHE_LINE_ALIGNED; + + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; + + eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; + + queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; + /* EO ID's */ + em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; +} perf_shm_t; + +COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, + PERF_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; + +/* EM-core local state */ +static ENV_LOCAL int core_state = CORE_STATE_MEASURE; + +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); + +static void +queue_step(void); + +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context); + +static int +update_test_state(em_event_t event); + +static void +create_and_link_queues(int start_queue, int num_queues); + +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]); + +static inline em_event_t +alloc_free_per_event(em_event_t event); + +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Test error handler + * + * @param eo Execution object id + * @param error The error code + * @param escope Error scope + * @param args List of arguments (__FILE__, __func__, __LINE__, + * (format), ## __VA_ARGS__) + * + * @return The original error code. + */ +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) +{ + if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { + APPL_PRINT("\nUnable to create more queues\n\n" + "Test finished\n"); + raise(SIGINT); + return error; + } + + if (appl_shm->exit_flag && EM_ESCOPE(escope) && + !EM_ERROR_IS_FATAL(error)) { + /* Suppress non-fatal EM-error logs during tear-down */ + if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { + APPL_PRINT("\nExit: suppress queue setup error\n\n"); + return error; + } + } + + return test_error_handler(eo, error, escope, args); +} + +/** + * Init of the Queues performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfQueuesSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(error_handler); + } else { + perf_shm = env_shared_lookup("PerfQueuesSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf test queues init failed on EM-core: %u\n", + em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Queues performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + eo_context_t *eo_ctx; + em_status_t ret, start_ret = EM_ERROR; + const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); + int i; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + " Max. NUM_QUEUES: %i\n" + " sizeof queue_context_tbl: %i kB\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + perf_shm->test_status.cpu_hz = env_core_hz(); + perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / + 1000000.0; + perf_shm->test_status.reset_flag = 0; + perf_shm->test_status.num_cores = em_core_count(); + perf_shm->test_status.free_flag = 0; + + env_atomic64_init(&perf_shm->test_status.ready_count); + env_atomic64_init(&perf_shm->test_status.freed_count); + + /* Create EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo_ctx = &perf_shm->eo_context_tbl[i]; + perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, + stop, NULL, receive_func, + eo_ctx); + test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, + "EO create failed:%d", i, NUM_EOS); + } + + APPL_PRINT(" EOs created\n"); + + /* + * Create and link queues + */ + if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ + create_and_link_queues(0, NUM_QUEUES); + else /* Create queues for the first step, then more before each step */ + create_and_link_queues(0, queue_steps[0]); + + /* Start EOs */ + for (i = 0; i < NUM_EOS; i++) { + ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start(%d):%" PRI_STAT " %" PRI_STAT "", + i, ret, start_ret); + } + + queue_step(); +} + +/** + * Stop the test, only run on one core + */ +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* Stop EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", + eo, ret); + } + + /* Remove and delete all of the EO's queues, then delete the EO */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE/*delete Qs*/); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", + eo, ret); + } +} + +/** + * Terminate the test, only run on one core + */ +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + env_shared_free(perf_shm); + em_unregister_error_handler(); +} + +/** + * Allocate, initialize and send test step events. + */ +static void +queue_step(void) +{ + queue_context_t *q_ctx; + em_event_t event; + perf_event_t *perf_event; + em_status_t ret; + const int first = perf_shm->test_status.queues; + const int step = perf_shm->test_status.step; + const int queue_count = queue_steps[step]; + int i, j; + + /* Allocate and send test events for the queues in the first step */ + if (CONST_NUM_EVENTS) { + for (i = 0; i < CONST_NUM_EVENTS; i++) { + event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "EM alloc failed (%i)", i); + perf_event = em_event_pointer(event); + perf_event->seq = i; + perf_event->send_time = env_time_global(); + + /* Allocate events evenly to the queues */ + q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; + ret = em_send(event, q_ctx->this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, q_ctx->this_queue); + em_free(event); + return; + } + } + } else { + for (i = first; i < queue_count; i++) { + q_ctx = &perf_shm->queue_context_tbl[i]; + + for (j = 0; j < NUM_EVENTS; j++) { + event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "EM alloc failed (%d)", i); + + perf_event = em_event_pointer(event); + perf_event->seq = i * NUM_EVENTS + j; + perf_event->send_time = env_time_global(); + + ret = em_send(event, q_ctx->this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, q_ctx->this_queue); + em_free(event); + return; + } + } + } + } + + perf_shm->test_status.queues = queue_count; + perf_shm->test_status.step++; + + APPL_PRINT("\nNumber of queues: %d\n", perf_shm->test_status.queues); + if (CONST_NUM_EVENTS) + APPL_PRINT("Number of events: %d\n", CONST_NUM_EVENTS); + else + APPL_PRINT("Number of events: %d\n", + perf_shm->test_status.queues * NUM_EVENTS); +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo_id = eo; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + return EM_OK; +} + +/** + * @private + * + * EO receive function. + * + * Loops back events and calculates the event rate. + */ +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context) +{ + env_time_t recv_time; + perf_event_t *perf_event; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (MEASURE_LATENCY) { + recv_time = env_time_global(); + perf_event = em_event_pointer(event); + } + + queue_context_t *q_ctx; + em_queue_t dst_queue; + em_status_t ret; + int do_return; + + (void)eo_context; + (void)type; + + q_ctx = q_context; + + /* + * Helper: Update the test state, count recv events, + * calc & print stats, prepare for next step + */ + do_return = update_test_state(event); + if (unlikely(do_return)) + return; + + if (ALLOC_FREE_PER_EVENT) + event = alloc_free_per_event(event); + + dst_queue = q_ctx->next_queue; + test_fatal_if(queue != q_ctx->this_queue, "Queue config error"); + + if (MEASURE_LATENCY) { + measure_latency(perf_event, q_ctx, recv_time); + perf_event->send_time = env_time_global(); + } + /* Send the event to the next queue */ + ret = em_send(event, dst_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dst_queue); + } +} + +/** + * Receive function helper: Update the test state + * + * Calculates the number of received events, maintains & prints test statistics + * and restarts/reconfigures the test for the next queue/event-setup + * + * @return '1' if the caller receive function should immediately return, + * '0' otherwise + */ +static inline int +update_test_state(em_event_t event) +{ + uint64_t events; + uint64_t freed_count; + uint64_t ready_count; + const int core = em_core_id(); + test_status_t *const tstat = &perf_shm->test_status; + core_stat_t *const cstat = &perf_shm->core_stat[core]; + + events = cstat->events; + events++; + + if (unlikely(tstat->reset_flag)) { + events = 0; + if (CONST_NUM_EVENTS) { + /* Free all old events before allocating new ones. */ + if (unlikely(tstat->free_flag)) { + em_free(event); + freed_count = + env_atomic64_add_return(&tstat->freed_count, 1); + if (freed_count == CONST_NUM_EVENTS) { + /* Last event */ + env_atomic64_set(&tstat->freed_count, + 0); + tstat->reset_flag = 0; + tstat->free_flag = 0; + queue_step(); + } + /* Req caller receive-func to return */ + return 1; + } + } + + if (unlikely(core_state != CORE_STATE_IDLE)) { + core_state = CORE_STATE_IDLE; + cstat->begin_time = ENV_TIME_NULL; + + ready_count = + env_atomic64_add_return(&tstat->ready_count, 1); + + if (ready_count == (uint64_t)tstat->num_cores) { + env_atomic64_set(&tstat->ready_count, 0); + + if (CONST_NUM_EVENTS) { + int sample = tstat->samples; + int queues = tstat->queues; + + if (sample == 0 && queues < NUM_QUEUES) + tstat->free_flag = 1; + else + tstat->reset_flag = 0; + } else { + tstat->reset_flag = 0; + } + } + } + } else if (unlikely(events == 1)) { + cstat->begin_time = env_time_global(); + cstat->latency.events = 0; + cstat->latency.hi_prio_ave = ENV_TIME_NULL; + cstat->latency.hi_prio_max = ENV_TIME_NULL; + cstat->latency.lo_prio_ave = ENV_TIME_NULL; + cstat->latency.lo_prio_max = ENV_TIME_NULL; + + core_state = CORE_STATE_MEASURE; + } else if (unlikely(events == EVENTS_PER_SAMPLE)) { + /* + * Measurements done for this step. Store results and continue + * receiving events until all cores are done. + */ + env_time_t begin_time, end_time; + + cstat->end_time = env_time_global(); + + end_time = cstat->end_time; + begin_time = cstat->begin_time; + cstat->diff_time = env_time_diff(end_time, begin_time); + + ready_count = env_atomic64_add_return(&tstat->ready_count, 1); + + /* + * Check whether all cores are done with the step, + * and if done proceed to the next step + */ + if (unlikely((int)ready_count == tstat->num_cores)) { + /* No real need for atomicity here, ran on last core*/ + env_atomic64_set(&tstat->ready_count, 0); + + tstat->reset_flag = 1; + tstat->samples++; + + /* + * Print statistics. + * Omit prints for the first sample round to allow the + * test to stabilize after setups and teardowns. + */ + if (tstat->samples > 1) { + int print_header = tstat->samples == 2 ? 1 : 0; + + print_test_statistics(tstat, print_header, + perf_shm->core_stat); + } + + /* + * Start next test step - setup new queues + */ + if (tstat->samples == NUM_SAMPLES && + tstat->queues < NUM_QUEUES) { + if (!CREATE_ALL_QUEUES_AT_STARTUP) { + int step = tstat->step; + int first_q = tstat->queues; + int num_qs = queue_steps[step] - + queue_steps[step - 1]; + + create_and_link_queues(first_q, num_qs); + } + + if (!CONST_NUM_EVENTS) + queue_step(); + + tstat->samples = 0; + } + } + } + + cstat->events = events; + + return 0; +} + +/** + * Creates a number of EM queues, associates them with EOs, and links them. + */ +static void +create_and_link_queues(int start_queue, int num_queues) +{ + int i, j; + em_queue_t queue, prev_queue; + em_queue_prio_t prio; + em_queue_type_t type; + em_status_t ret; + queue_context_t *q_ctx; + + APPL_PRINT("\nCreate new queues: %d\n", num_queues); + + if (num_queues % NUM_EOS != 0) { + APPL_PRINT("%s() arg 'num_queues'=%d not multiple of NUM_EOS=%d\n", + __func__, num_queues, NUM_EOS); + return; + } + + for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { + prev_queue = EM_QUEUE_UNDEF; + + for (j = 0; j < NUM_EOS; j++) { + prio = EM_QUEUE_PRIO_NORMAL; + + if (MEASURE_LATENCY) { + if (j == 0) + prio = EM_QUEUE_PRIO_HIGH; + } + + type = QUEUE_TYPE; + queue = em_queue_create("queue", type, prio, + EM_QUEUE_GROUP_DEFAULT, NULL); + if (queue == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + i); + return; + } + + q_ctx = &perf_shm->queue_context_tbl[i + j]; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "em_queue_set_context():%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + + ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "em_eo_add_queue_sync():%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + em_queue_delete(queue); + return; + } + /* Link queues */ + q_ctx->this_queue = queue; + q_ctx->next_queue = prev_queue; + q_ctx->prio = prio; + q_ctx->type = type; + prev_queue = queue; + } + + /* Connect first queue to the last */ + q_ctx = &perf_shm->queue_context_tbl[i + 0]; + q_ctx->next_queue = prev_queue; + } + + APPL_PRINT("New Qs created:%d First:%" PRI_QUEUE " Last:%" PRI_QUEUE "\n", + num_queues, + perf_shm->queue_context_tbl[start_queue].this_queue, + perf_shm->queue_context_tbl[start_queue + + num_queues - 1].this_queue); +} + +/** + * Print test statistics + */ +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]) +{ + const int num_cores = test_status->num_cores; + const uint64_t cpu_hz = test_status->cpu_hz; + const double cpu_mhz = test_status->cpu_mhz; + const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; + const uint64_t print_count = test_status->print_count++; + env_time_t total_time = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) + total_time = env_time_sum(total_time, core_stat[i].diff_time); + + double cycles_per_event = 0.0; + double events_per_sec = 0.0; + + if (likely(total_events > 0)) + cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / + (double)total_events; + if (likely(cycles_per_event > 0)) /* Million events/s: */ + events_per_sec = cpu_mhz * num_cores / cycles_per_event; + + /* + * Print without latency statistics + */ + if (!MEASURE_LATENCY) { + if (print_header) + APPL_PRINT(RESULT_PRINTF_HDR); + APPL_PRINT(RESULT_PRINTF_FMT, + cycles_per_event, events_per_sec, + cpu_mhz, print_count); + return; + } + + /* + * Print with latency statistics + */ + uint64_t latency_events = 0; + env_time_t latency_hi_ave = ENV_TIME_NULL; + env_time_t latency_hi_max = ENV_TIME_NULL; + env_time_t latency_lo_ave = ENV_TIME_NULL; + env_time_t latency_lo_max = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) { + latency_events += core_stat[i].latency.events; + latency_hi_ave = env_time_sum(latency_hi_ave, + core_stat[i].latency.hi_prio_ave); + latency_lo_ave = env_time_sum(latency_lo_ave, + core_stat[i].latency.lo_prio_ave); + + if (env_time_cmp(core_stat[i].latency.hi_prio_max, + latency_hi_max) > 0) { + latency_hi_max = core_stat[i].latency.hi_prio_max; + } + if (env_time_cmp(core_stat[i].latency.lo_prio_max, + latency_lo_max) > 0) { + latency_lo_max = core_stat[i].latency.lo_prio_max; + } + } + + double lat_per_hi_ave = 0.0; + double lat_per_lo_ave = 0.0; + + if (likely(latency_events > 0)) { + lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / + (double)latency_events; + lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / + (double)latency_events; + } + + if (print_header) + APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); + APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, + cycles_per_event, events_per_sec, lat_per_hi_ave, + env_time_to_cycles(latency_hi_max, cpu_hz), + lat_per_lo_ave, + env_time_to_cycles(latency_lo_max, cpu_hz), + cpu_mhz, print_count); +} + +/** + * Free the input event and allocate a new one instead + */ +static inline em_event_t +alloc_free_per_event(em_event_t event) +{ + perf_event_t *perf_event = em_event_pointer(event); + env_time_t send_time = perf_event->send_time; + int seq = perf_event->seq; + uint32_t event_size = em_event_get_size(event); + + em_free(event); + + event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); + + perf_event = em_event_pointer(event); + + perf_event->send_time = send_time; + perf_event->seq = seq; + + return event; +} + +/** + * Measure the scheduling latency per event + */ +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time) +{ + const int core = em_core_id(); + core_stat_t *const cstat = &perf_shm->core_stat[core]; + const env_time_t send_time = perf_event->send_time; + env_time_t latency; + + if (perf_shm->test_status.reset_flag || + cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) + return; + + cstat->latency.events++; + + latency = env_time_diff(recv_time, send_time); + + if (q_ctx->prio == EM_QUEUE_PRIO_HIGH) { + cstat->latency.hi_prio_ave = + env_time_sum(cstat->latency.hi_prio_ave, latency); + if (env_time_cmp(latency, cstat->latency.hi_prio_max) > 0) + cstat->latency.hi_prio_max = latency; + } else { + cstat->latency.lo_prio_ave = + env_time_sum(cstat->latency.lo_prio_ave, latency); + if (env_time_cmp(latency, cstat->latency.lo_prio_max) > 0) + cstat->latency.lo_prio_max = latency; + } +} diff --git a/programs/performance/queues_local.c b/programs/performance/queues_local.c index 932e2b3d..0b80d8c0 100644 --- a/programs/performance/queues_local.c +++ b/programs/performance/queues_local.c @@ -1,1066 +1,1066 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test. - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of queues and events in the system. The test increases - * the number of queues[+events] for each measurement round and prints the - * results. The test will stop if the maximum number of supported queues by the - * system is reached. - * - * Test derived from the programs/performance/queues.c test but additionally - * uses local queues between the processing EO's. - * - * Plot the cycles/event to get an idea of how the system scales with an - * increasing number of queues. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test options: - */ - -/* Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ - -/* - * Create all EM queues at startup or create the queues during - * the test in steps. - */ -#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ - -/* - * Measure the send-enqueue-schedule-receive latency. Measured separately for - * 'high priority and 'low priority' queues (ratio 1:4). - */ -#define MEASURE_LATENCY 1 /* false=0 or true=1 */ - -/* - * Keep the number of events constant while increasing the number of queues. - * Should be dividable by or factor of queue_step. - */ -#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ - -/* - * Test configuration: - */ - -#define MAX_CORES 64 - -/* Number of EO's and queues in a loop */ -#define NUM_EOS 4 - -#define NUM_SCHED_QUEUES (1) -#define NUM_LOCAL_QUEUES (NUM_EOS - NUM_SCHED_QUEUES) -COMPILE_TIME_ASSERT(NUM_SCHED_QUEUES + NUM_LOCAL_QUEUES == NUM_EOS, - INVALID_NUM_QUEUES_IN_LOOP); - -/* Number of events per queue */ -#define NUM_EVENTS 4 - -#if CONST_NUM_EVENTS > 0 -/* - * Total number of queues when using a constant number of events. - * Make sure that all queues get 'NUM_EVENTS' events per queue. - */ -#define NUM_QUEUES (CONST_NUM_EVENTS / NUM_EVENTS) -#else -/* - * Total number of queues when increasing the total event count for each queue - * step. - */ -#define NUM_QUEUES (NUM_EOS * 16 * 1024) -#endif - -/* Number of data bytes in an event */ -#define DATA_SIZE 128 - -/* Samples before adding more queues */ -#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ - -/* Num events a core processes between samples */ -#define EVENTS_PER_SAMPLE 0x100000 - -/* EM queue type */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/* Core states during test. */ -#define CORE_STATE_MEASURE 0 -#define CORE_STATE_IDLE 1 - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" -#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" - -/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ -#define RESULT_PRINTF_LATENCY_HDR \ -"Cycles/ Events/ Latency:\n" \ -" Event Sec sched-ave sched-max local-ave local-max cpu-freq\n" -#define RESULT_PRINTF_LATENCY_FMT \ -"%6.0f %7.2f M %11.0f %10" PRIu64 " %10.0f %10" PRIu64 " %5.0f MHz %" PRIu64 "\n" - -/* - * The number of scheduled queues to use in each test step. - * - * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of - * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop - * before reaching the end of the list). - */ -static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, - 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; - -/** - * Test state, - * cache line alignment and padding handled in 'perf_shm_t' - */ -typedef struct { - int queues; - int step; - int samples; - int num_cores; - int reset_flag; - double cpu_mhz; - uint64_t cpu_hz; - uint64_t print_count; - env_atomic64_t ready_count; - /* if using CONST_NUM_EVENTS:*/ - int free_flag; - env_atomic64_t freed_count; -} test_status_t; - -/** - * Performance test statistics (per core) - */ -typedef struct { - uint64_t events; - env_time_t begin_time; - env_time_t end_time; - env_time_t diff_time; - struct { - uint64_t events; - env_time_t sched_ave; - env_time_t sched_max; - env_time_t local_ave; - env_time_t local_max; - } latency; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, - CORE_STAT_SIZE_ERROR); - -/** - * EO context data - */ -typedef struct { - em_eo_t eo_id; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} eo_context_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, - EO_CONTEXT_T__SIZE_ERROR); - -/** - * Queue context data - */ -typedef struct { - /** This queue */ - em_queue_t this_queue; - /** Next queue */ - em_queue_t next_queue; - /** Priority of 'this_queue' */ - em_queue_prio_t prio; - /** Type of 'this_queue' */ - em_queue_type_t type; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_context_t; - -COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, - QUEUE_CONTEXT_SIZE_ERROR); - -/** - * Performance test event - */ -typedef struct { - /* Send time stamp */ - env_time_t send_time; - /* Sequence number */ - int seq; - /* Test data */ - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - - test_status_t test_status ENV_CACHE_LINE_ALIGNED; - - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; - - eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; - - queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; - /* EO ID's */ - em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; -} perf_shm_t; - -COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, - PERF_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; - -/* EM-core local state */ -static ENV_LOCAL int core_state = CORE_STATE_MEASURE; - -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); - -static void -queue_step(void); - -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context); - -static int -update_test_state(em_event_t event); - -static void -create_and_link_queues(int start_queue, int num_queues); - -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]); - -static inline em_event_t -alloc_free_per_event(em_event_t event); - -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Test error handler - * - * @param eo Execution object id - * @param error The error code - * @param escope Error scope - * @param args List of arguments (__FILE__, __func__, __LINE__, - * (format), ## __VA_ARGS__) - * - * @return The original error code. - */ -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) -{ - if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { - APPL_PRINT("\nUnable to create more queues\n\n" - "Test finished\n"); - raise(SIGINT); - return error; - } - - if (appl_shm->exit_flag && EM_ESCOPE(escope) && - !EM_ERROR_IS_FATAL(error)) { - /* Suppress non-fatal EM-error logs during tear-down */ - if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { - APPL_PRINT("\nExit: suppress queue setup error\n\n"); - return error; - } - } - - return test_error_handler(eo, error, escope, args); -} - -/** - * Init of the Queues performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfQueuesSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(error_handler); - } else { - perf_shm = env_shared_lookup("PerfQueuesSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf test queues init failed on EM-core: %u\n", - em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Queues performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - eo_context_t *eo_ctx; - em_status_t ret, start_ret = EM_ERROR; - const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); - int i; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - " Max. NUM_QUEUES: %i\n" - " sizeof queue_context_tbl: %i kB\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - perf_shm->test_status.cpu_hz = env_core_hz(); - perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / - 1000000.0; - perf_shm->test_status.num_cores = em_core_count(); - perf_shm->test_status.free_flag = 0; - - env_atomic64_init(&perf_shm->test_status.ready_count); - env_atomic64_init(&perf_shm->test_status.freed_count); - - /* Create EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo_ctx = &perf_shm->eo_context_tbl[i]; - perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, - stop, NULL, receive_func, - eo_ctx); - test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, - "EO create failed:%d", i, NUM_EOS); - } - - APPL_PRINT(" EOs created\n"); - - /* - * Create and link queues - */ - if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ - create_and_link_queues(0, NUM_QUEUES); - else /* Create queues for the first step, then more before each step */ - create_and_link_queues(0, queue_steps[0]); - - /* Start EOs */ - for (i = 0; i < NUM_EOS; i++) { - ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start(%d):%" PRI_STAT " %" PRI_STAT "", - i, ret, start_ret); - } - - queue_step(); -} - -/** - * Stop the test, only run on one core - */ -void -test_stop(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); - - /* Stop EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - ret = em_eo_stop_sync(eo); - - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", - eo, ret); - } - - /* Remove and delete all of the EO's queues, then delete the EO */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE/*delete Qs*/); - - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", - eo, ret); - } -} - -/** - * Terminate the test, only run on one core - */ -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - env_shared_free(perf_shm); - em_unregister_error_handler(); -} - -/** - * Allocate, initialize and send test step events. - */ -static void -queue_step(void) -{ - queue_context_t *q_ctx; - em_event_t event; - perf_event_t *perf_event; - em_status_t ret; - const int first = perf_shm->test_status.queues; - const int step = perf_shm->test_status.step; - const int queue_count = queue_steps[step]; - int i, j, idx, qidx; - - /* Allocate and send test events for the queues in the first step */ - if (CONST_NUM_EVENTS) { - for (i = 0; i < CONST_NUM_EVENTS; i++) { - event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "EM alloc failed (%i)", i); - perf_event = em_event_pointer(event); - perf_event->seq = i; - perf_event->send_time = env_time_global(); - - /* Allocate events evenly to the queues */ - qidx = i % queue_count; - idx = (qidx / NUM_EOS) * NUM_EOS; - idx = idx + qidx % NUM_SCHED_QUEUES; - - q_ctx = &perf_shm->queue_context_tbl[idx]; - test_fatal_if(q_ctx->type == EM_QUEUE_TYPE_LOCAL, - "Illegal QueueType:%" PRI_QTYPE "", - q_ctx->this_queue); - - ret = em_send(event, q_ctx->this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, q_ctx->this_queue); - em_free(event); - return; - } - } - } else { - for (i = first; i < queue_count; i++) { - qidx = i % queue_count; - idx = (qidx / NUM_EOS) * NUM_EOS; - idx = idx + qidx % NUM_SCHED_QUEUES; - q_ctx = &perf_shm->queue_context_tbl[idx]; - - test_fatal_if(q_ctx->type == EM_QUEUE_TYPE_LOCAL, - "Illegal QueueType:%" PRI_QTYPE "", - q_ctx->this_queue); - - for (j = 0; j < NUM_EVENTS; j++) { - event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "EM alloc failed (%i)", i); - - perf_event = em_event_pointer(event); - perf_event->seq = i * NUM_EVENTS + j; - perf_event->send_time = env_time_global(); - - ret = em_send(event, q_ctx->this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, q_ctx->this_queue); - em_free(event); - return; - } - } - } - } - - perf_shm->test_status.queues = queue_count; - perf_shm->test_status.step++; - - APPL_PRINT("\nNumber of queues: %d - scheduled:%d + local:%d\n", - perf_shm->test_status.queues, - (perf_shm->test_status.queues * NUM_SCHED_QUEUES) / NUM_EOS, - (perf_shm->test_status.queues * NUM_LOCAL_QUEUES) / NUM_EOS); - if (CONST_NUM_EVENTS) - APPL_PRINT("Number of events: %d\n", CONST_NUM_EVENTS); - else - APPL_PRINT("Number of events: %d\n", - perf_shm->test_status.queues * NUM_EVENTS); -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo_id = eo; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - return EM_OK; -} - -/** - * @private - * - * EO receive function. - * - * Loops back events and calculates the event rate. - */ -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context) -{ - env_time_t recv_time; - perf_event_t *perf_event; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - if (MEASURE_LATENCY) { - recv_time = env_time_global(); - perf_event = em_event_pointer(event); - } - - queue_context_t *q_ctx; - em_queue_t dst_queue; - em_status_t ret; - int do_return; - - (void)eo_context; - (void)type; - - q_ctx = q_context; - - /* - * Helper: Update the test state, count recv events, - * calc & print stats, prepare for next step - */ - do_return = update_test_state(event); - if (unlikely(do_return)) - return; - - if (ALLOC_FREE_PER_EVENT) - event = alloc_free_per_event(event); - - dst_queue = q_ctx->next_queue; - test_fatal_if(queue != q_ctx->this_queue, "Queue config error"); - - if (MEASURE_LATENCY) { - measure_latency(perf_event, q_ctx, recv_time); - perf_event->send_time = env_time_global(); - } - /* Send the event to the next queue */ - ret = em_send(event, dst_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dst_queue); - } -} - -/** - * Receive function helper: Update the test state - * - * Calculates the number of received events, maintains & prints test statistics - * and restarts/reconfigures the test for the next queue/event-setup - * - * @return '1' if the caller receive function should immediately return, - * '0' otherwise - */ -static inline int -update_test_state(em_event_t event) -{ - uint64_t events; - uint64_t freed_count; - uint64_t ready_count; - const int core = em_core_id(); - test_status_t *const tstat = &perf_shm->test_status; - core_stat_t *const cstat = &perf_shm->core_stat[core]; - - events = cstat->events; - events++; - - if (unlikely(tstat->reset_flag)) { - events = 0; - if (CONST_NUM_EVENTS) { - /* Free all old events before allocating new ones. */ - if (unlikely(tstat->free_flag)) { - em_free(event); - freed_count = - env_atomic64_add_return(&tstat->freed_count, 1); - if (freed_count == CONST_NUM_EVENTS) { - /* Last event */ - env_atomic64_set(&tstat->freed_count, - 0); - tstat->reset_flag = 0; - tstat->free_flag = 0; - queue_step(); - } - /* Req caller receive-func to return */ - return 1; - } - } - - if (unlikely(core_state != CORE_STATE_IDLE)) { - core_state = CORE_STATE_IDLE; - cstat->begin_time = ENV_TIME_NULL; - - ready_count = - env_atomic64_add_return(&tstat->ready_count, 1); - - if (ready_count == (uint64_t)tstat->num_cores) { - env_atomic64_set(&tstat->ready_count, 0); - - if (CONST_NUM_EVENTS) { - int sample = tstat->samples; - int queues = tstat->queues; - - if (sample == 0 && queues < NUM_QUEUES) - tstat->free_flag = 1; - else - tstat->reset_flag = 0; - } else { - tstat->reset_flag = 0; - } - } - } - } else if (unlikely(events == 1)) { - cstat->begin_time = env_time_global(); - cstat->latency.events = 0; - cstat->latency.sched_ave = ENV_TIME_NULL; - cstat->latency.sched_max = ENV_TIME_NULL; - cstat->latency.local_ave = ENV_TIME_NULL; - cstat->latency.local_max = ENV_TIME_NULL; - - core_state = CORE_STATE_MEASURE; - } else if (unlikely(events == EVENTS_PER_SAMPLE)) { - /* - * Measurements done for this step. Store results and continue - * receiving events until all cores are done. - */ - env_time_t begin_time, end_time; - - cstat->end_time = env_time_global(); - - end_time = cstat->end_time; - begin_time = cstat->begin_time; - cstat->diff_time = env_time_diff(end_time, begin_time); - - ready_count = env_atomic64_add_return(&tstat->ready_count, 1); - - /* - * Check whether all cores are done with the step, - * and if done proceed to the next step - */ - if (unlikely((int)ready_count == tstat->num_cores)) { - /* No real need for atomicity here, ran on last core*/ - env_atomic64_set(&tstat->ready_count, 0); - - tstat->reset_flag = 1; - tstat->samples++; - - /* - * Print statistics. - * Omit prints for the first sample round to allow the - * test to stabilize after setups and teardowns. - */ - if (tstat->samples > 1) { - int print_header = tstat->samples == 2 ? 1 : 0; - - print_test_statistics(tstat, print_header, - perf_shm->core_stat); - } - - /* - * Start next test step - setup new queues - */ - if (tstat->samples == NUM_SAMPLES && - tstat->queues < NUM_QUEUES) { - if (!CREATE_ALL_QUEUES_AT_STARTUP) { - int step = tstat->step; - int first_q = tstat->queues; - int num_qs = queue_steps[step] - - queue_steps[step - 1]; - - create_and_link_queues(first_q, num_qs); - } - - if (!CONST_NUM_EVENTS) - queue_step(); - - tstat->samples = 0; - } - } - } - - cstat->events = events; - - return 0; -} - -/** - * Creates a number of EM queues, associates them with EOs, and links them. - */ -static void -create_and_link_queues(int start_queue, int num_queues) -{ - int i, j; - em_queue_t queue, prev_queue; - em_queue_prio_t prio; - em_queue_type_t type; - em_queue_group_t group; - queue_context_t *q_ctx; - em_status_t ret; - - APPL_PRINT("\nCreate new queues: %d - scheduled:%d + local:%d\n", - num_queues, - (num_queues * NUM_SCHED_QUEUES) / NUM_EOS, - (num_queues * NUM_LOCAL_QUEUES) / NUM_EOS); - - if (num_queues % NUM_EOS != 0) { - APPL_PRINT("%s() 'num_queues'=%d not multiple of NUM_EOS=%d\n", - __func__, num_queues, NUM_EOS); - return; - } - - for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { - prev_queue = EM_QUEUE_UNDEF; - - for (j = 0; j < NUM_EOS; j++) { - prio = EM_QUEUE_PRIO_NORMAL; - - if (j < NUM_SCHED_QUEUES) { - type = QUEUE_TYPE; - group = EM_QUEUE_GROUP_DEFAULT; - } else { - type = EM_QUEUE_TYPE_LOCAL; - group = EM_QUEUE_GROUP_UNDEF; - } - - queue = em_queue_create("queue", type, prio, group, - NULL); - if (queue == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - i); - return; - } - - q_ctx = &perf_shm->queue_context_tbl[i + j]; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "em_queue_set_context():%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - - ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "em_eo_add_queue_sync():%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - em_queue_delete(queue); - return; - } - /* Link queues */ - q_ctx->this_queue = queue; - q_ctx->next_queue = prev_queue; - q_ctx->prio = prio; - q_ctx->type = type; - prev_queue = queue; - } - - /* Connect first queue to the last */ - q_ctx = &perf_shm->queue_context_tbl[i + 0]; - q_ctx->next_queue = prev_queue; - } - - APPL_PRINT("New Qs created:%d First:%" PRI_QUEUE " Last:%" PRI_QUEUE "\n", - num_queues, - perf_shm->queue_context_tbl[start_queue].this_queue, - perf_shm->queue_context_tbl[start_queue + - num_queues - 1].this_queue); -} - -/** - * Print test statistics - */ -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]) -{ - const int num_cores = test_status->num_cores; - const uint64_t cpu_hz = test_status->cpu_hz; - const double cpu_mhz = test_status->cpu_mhz; - const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; - const uint64_t print_count = test_status->print_count++; - env_time_t total_time = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) - total_time = env_time_sum(total_time, core_stat[i].diff_time); - - double cycles_per_event = 0.0; - double events_per_sec = 0.0; - - if (likely(total_events > 0)) - cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / - (double)total_events; - if (likely(cycles_per_event > 0)) /* Million events/s: */ - events_per_sec = cpu_mhz * num_cores / cycles_per_event; - - /* - * Print without latency statistics - */ - if (!MEASURE_LATENCY) { - if (print_header) - APPL_PRINT(RESULT_PRINTF_HDR); - APPL_PRINT(RESULT_PRINTF_FMT, - cycles_per_event, events_per_sec, - cpu_mhz, print_count); - return; - } - - /* - * Print with latency statistics - */ - uint64_t latency_events = 0; - env_time_t latency_hi_ave = ENV_TIME_NULL; - env_time_t latency_hi_max = ENV_TIME_NULL; - env_time_t latency_lo_ave = ENV_TIME_NULL; - env_time_t latency_lo_max = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) { - latency_events += core_stat[i].latency.events; - - latency_hi_ave = env_time_sum(latency_hi_ave, - core_stat[i].latency.sched_ave); - latency_lo_ave = env_time_sum(latency_lo_ave, - core_stat[i].latency.local_ave); - - if (env_time_cmp(core_stat[i].latency.sched_max, - latency_hi_max) > 0) { - latency_hi_max = core_stat[i].latency.sched_max; - } - if (env_time_cmp(core_stat[i].latency.local_max, - latency_lo_max) > 0) { - latency_lo_max = core_stat[i].latency.local_max; - } - } - - double lat_per_hi_ave = 0.0; - double lat_per_lo_ave = 0.0; - - if (likely(latency_events > 0)) { - lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / - (double)latency_events; - lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / - (double)latency_events; - } - - if (print_header) - APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); - APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, - cycles_per_event, events_per_sec, lat_per_hi_ave, - env_time_to_cycles(latency_hi_max, cpu_hz), - lat_per_lo_ave, - env_time_to_cycles(latency_lo_max, cpu_hz), - cpu_mhz, print_count); -} - -/** - * Free the input event and allocate a new one instead - */ -static inline em_event_t -alloc_free_per_event(em_event_t event) -{ - perf_event_t *perf_event = em_event_pointer(event); - env_time_t send_time = perf_event->send_time; - int seq = perf_event->seq; - size_t event_size = em_event_get_size(event); - - em_free(event); - - event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); - - perf_event = em_event_pointer(event); - - perf_event->send_time = send_time; - perf_event->seq = seq; - - return event; -} - -/** - * Measure the scheduling latency per event - */ -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time) -{ - const int core = em_core_id(); - core_stat_t *const cstat = &perf_shm->core_stat[core]; - const env_time_t send_time = perf_event->send_time; - env_time_t latency; - - if (perf_shm->test_status.reset_flag || - cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) - return; - - cstat->latency.events++; - - latency = env_time_diff(recv_time, send_time); - - if (q_ctx->type != EM_QUEUE_TYPE_LOCAL) { - cstat->latency.sched_ave = - env_time_sum(cstat->latency.sched_ave, latency); - if (env_time_cmp(latency, cstat->latency.sched_max) > 0) - cstat->latency.sched_max = latency; - } else { - cstat->latency.local_ave = - env_time_sum(cstat->latency.local_ave, latency); - - if (env_time_cmp(latency, cstat->latency.local_max) > 0) - cstat->latency.local_max = latency; - } -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test. + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of queues and events in the system. The test increases + * the number of queues[+events] for each measurement round and prints the + * results. The test will stop if the maximum number of supported queues by the + * system is reached. + * + * Test derived from the programs/performance/queues.c test but additionally + * uses local queues between the processing EO's. + * + * Plot the cycles/event to get an idea of how the system scales with an + * increasing number of queues. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test options: + */ + +/* Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ + +/* + * Create all EM queues at startup or create the queues during + * the test in steps. + */ +#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ + +/* + * Measure the send-enqueue-schedule-receive latency. Measured separately for + * 'high priority and 'low priority' queues (ratio 1:4). + */ +#define MEASURE_LATENCY 1 /* false=0 or true=1 */ + +/* + * Keep the number of events constant while increasing the number of queues. + * Should be dividable by or factor of queue_step. + */ +#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ + +/* + * Test configuration: + */ + +#define MAX_CORES 64 + +/* Number of EO's and queues in a loop */ +#define NUM_EOS 4 + +#define NUM_SCHED_QUEUES (1) +#define NUM_LOCAL_QUEUES (NUM_EOS - NUM_SCHED_QUEUES) +COMPILE_TIME_ASSERT(NUM_SCHED_QUEUES + NUM_LOCAL_QUEUES == NUM_EOS, + INVALID_NUM_QUEUES_IN_LOOP); + +/* Number of events per queue */ +#define NUM_EVENTS 4 + +#if CONST_NUM_EVENTS > 0 +/* + * Total number of queues when using a constant number of events. + * Make sure that all queues get 'NUM_EVENTS' events per queue. + */ +#define NUM_QUEUES (CONST_NUM_EVENTS / NUM_EVENTS) +#else +/* + * Total number of queues when increasing the total event count for each queue + * step. + */ +#define NUM_QUEUES (NUM_EOS * 16 * 1024) +#endif + +/* Number of data bytes in an event */ +#define DATA_SIZE 128 + +/* Samples before adding more queues */ +#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ + +/* Num events a core processes between samples */ +#define EVENTS_PER_SAMPLE 0x100000 + +/* EM queue type */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/* Core states during test. */ +#define CORE_STATE_MEASURE 0 +#define CORE_STATE_IDLE 1 + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" +#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" + +/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ +#define RESULT_PRINTF_LATENCY_HDR \ +"Cycles/ Events/ Latency:\n" \ +" Event Sec sched-ave sched-max local-ave local-max cpu-freq\n" +#define RESULT_PRINTF_LATENCY_FMT \ +"%6.0f %7.2f M %11.0f %10" PRIu64 " %10.0f %10" PRIu64 " %5.0f MHz %" PRIu64 "\n" + +/* + * The number of scheduled queues to use in each test step. + * + * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of + * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop + * before reaching the end of the list). + */ +static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, + 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; + +/** + * Test state, + * cache line alignment and padding handled in 'perf_shm_t' + */ +typedef struct { + int queues; + int step; + int samples; + int num_cores; + int reset_flag; + double cpu_mhz; + uint64_t cpu_hz; + uint64_t print_count; + env_atomic64_t ready_count; + /* if using CONST_NUM_EVENTS:*/ + int free_flag; + env_atomic64_t freed_count; +} test_status_t; + +/** + * Performance test statistics (per core) + */ +typedef struct { + uint64_t events; + env_time_t begin_time; + env_time_t end_time; + env_time_t diff_time; + struct { + uint64_t events; + env_time_t sched_ave; + env_time_t sched_max; + env_time_t local_ave; + env_time_t local_max; + } latency; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, + CORE_STAT_SIZE_ERROR); + +/** + * EO context data + */ +typedef struct { + em_eo_t eo_id; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} eo_context_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, + EO_CONTEXT_T__SIZE_ERROR); + +/** + * Queue context data + */ +typedef struct { + /** This queue */ + em_queue_t this_queue; + /** Next queue */ + em_queue_t next_queue; + /** Priority of 'this_queue' */ + em_queue_prio_t prio; + /** Type of 'this_queue' */ + em_queue_type_t type; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_context_t; + +COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, + QUEUE_CONTEXT_SIZE_ERROR); + +/** + * Performance test event + */ +typedef struct { + /* Send time stamp */ + env_time_t send_time; + /* Sequence number */ + int seq; + /* Test data */ + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + + test_status_t test_status ENV_CACHE_LINE_ALIGNED; + + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; + + eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; + + queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; + /* EO ID's */ + em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; +} perf_shm_t; + +COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, + PERF_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; + +/* EM-core local state */ +static ENV_LOCAL int core_state = CORE_STATE_MEASURE; + +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); + +static void +queue_step(void); + +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context); + +static int +update_test_state(em_event_t event); + +static void +create_and_link_queues(int start_queue, int num_queues); + +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]); + +static inline em_event_t +alloc_free_per_event(em_event_t event); + +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Test error handler + * + * @param eo Execution object id + * @param error The error code + * @param escope Error scope + * @param args List of arguments (__FILE__, __func__, __LINE__, + * (format), ## __VA_ARGS__) + * + * @return The original error code. + */ +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) +{ + if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { + APPL_PRINT("\nUnable to create more queues\n\n" + "Test finished\n"); + raise(SIGINT); + return error; + } + + if (appl_shm->exit_flag && EM_ESCOPE(escope) && + !EM_ERROR_IS_FATAL(error)) { + /* Suppress non-fatal EM-error logs during tear-down */ + if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { + APPL_PRINT("\nExit: suppress queue setup error\n\n"); + return error; + } + } + + return test_error_handler(eo, error, escope, args); +} + +/** + * Init of the Queues performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfQueuesSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(error_handler); + } else { + perf_shm = env_shared_lookup("PerfQueuesSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf test queues init failed on EM-core: %u\n", + em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Queues performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + eo_context_t *eo_ctx; + em_status_t ret, start_ret = EM_ERROR; + const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); + int i; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + " Max. NUM_QUEUES: %i\n" + " sizeof queue_context_tbl: %i kB\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + perf_shm->test_status.cpu_hz = env_core_hz(); + perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / + 1000000.0; + perf_shm->test_status.num_cores = em_core_count(); + perf_shm->test_status.free_flag = 0; + + env_atomic64_init(&perf_shm->test_status.ready_count); + env_atomic64_init(&perf_shm->test_status.freed_count); + + /* Create EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo_ctx = &perf_shm->eo_context_tbl[i]; + perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, + stop, NULL, receive_func, + eo_ctx); + test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, + "EO create failed:%d", i, NUM_EOS); + } + + APPL_PRINT(" EOs created\n"); + + /* + * Create and link queues + */ + if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ + create_and_link_queues(0, NUM_QUEUES); + else /* Create queues for the first step, then more before each step */ + create_and_link_queues(0, queue_steps[0]); + + /* Start EOs */ + for (i = 0; i < NUM_EOS; i++) { + ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start(%d):%" PRI_STAT " %" PRI_STAT "", + i, ret, start_ret); + } + + queue_step(); +} + +/** + * Stop the test, only run on one core + */ +void +test_stop(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); + + /* Stop EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + ret = em_eo_stop_sync(eo); + + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", + eo, ret); + } + + /* Remove and delete all of the EO's queues, then delete the EO */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE/*delete Qs*/); + + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", + eo, ret); + } +} + +/** + * Terminate the test, only run on one core + */ +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + env_shared_free(perf_shm); + em_unregister_error_handler(); +} + +/** + * Allocate, initialize and send test step events. + */ +static void +queue_step(void) +{ + queue_context_t *q_ctx; + em_event_t event; + perf_event_t *perf_event; + em_status_t ret; + const int first = perf_shm->test_status.queues; + const int step = perf_shm->test_status.step; + const int queue_count = queue_steps[step]; + int i, j, idx, qidx; + + /* Allocate and send test events for the queues in the first step */ + if (CONST_NUM_EVENTS) { + for (i = 0; i < CONST_NUM_EVENTS; i++) { + event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "EM alloc failed (%i)", i); + perf_event = em_event_pointer(event); + perf_event->seq = i; + perf_event->send_time = env_time_global(); + + /* Allocate events evenly to the queues */ + qidx = i % queue_count; + idx = (qidx / NUM_EOS) * NUM_EOS; + idx = idx + qidx % NUM_SCHED_QUEUES; + + q_ctx = &perf_shm->queue_context_tbl[idx]; + test_fatal_if(q_ctx->type == EM_QUEUE_TYPE_LOCAL, + "Illegal QueueType:%" PRI_QTYPE "", + q_ctx->this_queue); + + ret = em_send(event, q_ctx->this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, q_ctx->this_queue); + em_free(event); + return; + } + } + } else { + for (i = first; i < queue_count; i++) { + qidx = i % queue_count; + idx = (qidx / NUM_EOS) * NUM_EOS; + idx = idx + qidx % NUM_SCHED_QUEUES; + q_ctx = &perf_shm->queue_context_tbl[idx]; + + test_fatal_if(q_ctx->type == EM_QUEUE_TYPE_LOCAL, + "Illegal QueueType:%" PRI_QTYPE "", + q_ctx->this_queue); + + for (j = 0; j < NUM_EVENTS; j++) { + event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "EM alloc failed (%i)", i); + + perf_event = em_event_pointer(event); + perf_event->seq = i * NUM_EVENTS + j; + perf_event->send_time = env_time_global(); + + ret = em_send(event, q_ctx->this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, q_ctx->this_queue); + em_free(event); + return; + } + } + } + } + + perf_shm->test_status.queues = queue_count; + perf_shm->test_status.step++; + + APPL_PRINT("\nNumber of queues: %d - scheduled:%d + local:%d\n", + perf_shm->test_status.queues, + (perf_shm->test_status.queues * NUM_SCHED_QUEUES) / NUM_EOS, + (perf_shm->test_status.queues * NUM_LOCAL_QUEUES) / NUM_EOS); + if (CONST_NUM_EVENTS) + APPL_PRINT("Number of events: %d\n", CONST_NUM_EVENTS); + else + APPL_PRINT("Number of events: %d\n", + perf_shm->test_status.queues * NUM_EVENTS); +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo_id = eo; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + return EM_OK; +} + +/** + * @private + * + * EO receive function. + * + * Loops back events and calculates the event rate. + */ +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context) +{ + env_time_t recv_time; + perf_event_t *perf_event; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + if (MEASURE_LATENCY) { + recv_time = env_time_global(); + perf_event = em_event_pointer(event); + } + + queue_context_t *q_ctx; + em_queue_t dst_queue; + em_status_t ret; + int do_return; + + (void)eo_context; + (void)type; + + q_ctx = q_context; + + /* + * Helper: Update the test state, count recv events, + * calc & print stats, prepare for next step + */ + do_return = update_test_state(event); + if (unlikely(do_return)) + return; + + if (ALLOC_FREE_PER_EVENT) + event = alloc_free_per_event(event); + + dst_queue = q_ctx->next_queue; + test_fatal_if(queue != q_ctx->this_queue, "Queue config error"); + + if (MEASURE_LATENCY) { + measure_latency(perf_event, q_ctx, recv_time); + perf_event->send_time = env_time_global(); + } + /* Send the event to the next queue */ + ret = em_send(event, dst_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dst_queue); + } +} + +/** + * Receive function helper: Update the test state + * + * Calculates the number of received events, maintains & prints test statistics + * and restarts/reconfigures the test for the next queue/event-setup + * + * @return '1' if the caller receive function should immediately return, + * '0' otherwise + */ +static inline int +update_test_state(em_event_t event) +{ + uint64_t events; + uint64_t freed_count; + uint64_t ready_count; + const int core = em_core_id(); + test_status_t *const tstat = &perf_shm->test_status; + core_stat_t *const cstat = &perf_shm->core_stat[core]; + + events = cstat->events; + events++; + + if (unlikely(tstat->reset_flag)) { + events = 0; + if (CONST_NUM_EVENTS) { + /* Free all old events before allocating new ones. */ + if (unlikely(tstat->free_flag)) { + em_free(event); + freed_count = + env_atomic64_add_return(&tstat->freed_count, 1); + if (freed_count == CONST_NUM_EVENTS) { + /* Last event */ + env_atomic64_set(&tstat->freed_count, + 0); + tstat->reset_flag = 0; + tstat->free_flag = 0; + queue_step(); + } + /* Req caller receive-func to return */ + return 1; + } + } + + if (unlikely(core_state != CORE_STATE_IDLE)) { + core_state = CORE_STATE_IDLE; + cstat->begin_time = ENV_TIME_NULL; + + ready_count = + env_atomic64_add_return(&tstat->ready_count, 1); + + if (ready_count == (uint64_t)tstat->num_cores) { + env_atomic64_set(&tstat->ready_count, 0); + + if (CONST_NUM_EVENTS) { + int sample = tstat->samples; + int queues = tstat->queues; + + if (sample == 0 && queues < NUM_QUEUES) + tstat->free_flag = 1; + else + tstat->reset_flag = 0; + } else { + tstat->reset_flag = 0; + } + } + } + } else if (unlikely(events == 1)) { + cstat->begin_time = env_time_global(); + cstat->latency.events = 0; + cstat->latency.sched_ave = ENV_TIME_NULL; + cstat->latency.sched_max = ENV_TIME_NULL; + cstat->latency.local_ave = ENV_TIME_NULL; + cstat->latency.local_max = ENV_TIME_NULL; + + core_state = CORE_STATE_MEASURE; + } else if (unlikely(events == EVENTS_PER_SAMPLE)) { + /* + * Measurements done for this step. Store results and continue + * receiving events until all cores are done. + */ + env_time_t begin_time, end_time; + + cstat->end_time = env_time_global(); + + end_time = cstat->end_time; + begin_time = cstat->begin_time; + cstat->diff_time = env_time_diff(end_time, begin_time); + + ready_count = env_atomic64_add_return(&tstat->ready_count, 1); + + /* + * Check whether all cores are done with the step, + * and if done proceed to the next step + */ + if (unlikely((int)ready_count == tstat->num_cores)) { + /* No real need for atomicity here, ran on last core*/ + env_atomic64_set(&tstat->ready_count, 0); + + tstat->reset_flag = 1; + tstat->samples++; + + /* + * Print statistics. + * Omit prints for the first sample round to allow the + * test to stabilize after setups and teardowns. + */ + if (tstat->samples > 1) { + int print_header = tstat->samples == 2 ? 1 : 0; + + print_test_statistics(tstat, print_header, + perf_shm->core_stat); + } + + /* + * Start next test step - setup new queues + */ + if (tstat->samples == NUM_SAMPLES && + tstat->queues < NUM_QUEUES) { + if (!CREATE_ALL_QUEUES_AT_STARTUP) { + int step = tstat->step; + int first_q = tstat->queues; + int num_qs = queue_steps[step] - + queue_steps[step - 1]; + + create_and_link_queues(first_q, num_qs); + } + + if (!CONST_NUM_EVENTS) + queue_step(); + + tstat->samples = 0; + } + } + } + + cstat->events = events; + + return 0; +} + +/** + * Creates a number of EM queues, associates them with EOs, and links them. + */ +static void +create_and_link_queues(int start_queue, int num_queues) +{ + int i, j; + em_queue_t queue, prev_queue; + em_queue_prio_t prio; + em_queue_type_t type; + em_queue_group_t group; + queue_context_t *q_ctx; + em_status_t ret; + + APPL_PRINT("\nCreate new queues: %d - scheduled:%d + local:%d\n", + num_queues, + (num_queues * NUM_SCHED_QUEUES) / NUM_EOS, + (num_queues * NUM_LOCAL_QUEUES) / NUM_EOS); + + if (num_queues % NUM_EOS != 0) { + APPL_PRINT("%s() 'num_queues'=%d not multiple of NUM_EOS=%d\n", + __func__, num_queues, NUM_EOS); + return; + } + + for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { + prev_queue = EM_QUEUE_UNDEF; + + for (j = 0; j < NUM_EOS; j++) { + prio = EM_QUEUE_PRIO_NORMAL; + + if (j < NUM_SCHED_QUEUES) { + type = QUEUE_TYPE; + group = EM_QUEUE_GROUP_DEFAULT; + } else { + type = EM_QUEUE_TYPE_LOCAL; + group = EM_QUEUE_GROUP_UNDEF; + } + + queue = em_queue_create("queue", type, prio, group, + NULL); + if (queue == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + i); + return; + } + + q_ctx = &perf_shm->queue_context_tbl[i + j]; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "em_queue_set_context():%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + + ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "em_eo_add_queue_sync():%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + em_queue_delete(queue); + return; + } + /* Link queues */ + q_ctx->this_queue = queue; + q_ctx->next_queue = prev_queue; + q_ctx->prio = prio; + q_ctx->type = type; + prev_queue = queue; + } + + /* Connect first queue to the last */ + q_ctx = &perf_shm->queue_context_tbl[i + 0]; + q_ctx->next_queue = prev_queue; + } + + APPL_PRINT("New Qs created:%d First:%" PRI_QUEUE " Last:%" PRI_QUEUE "\n", + num_queues, + perf_shm->queue_context_tbl[start_queue].this_queue, + perf_shm->queue_context_tbl[start_queue + + num_queues - 1].this_queue); +} + +/** + * Print test statistics + */ +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]) +{ + const int num_cores = test_status->num_cores; + const uint64_t cpu_hz = test_status->cpu_hz; + const double cpu_mhz = test_status->cpu_mhz; + const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; + const uint64_t print_count = test_status->print_count++; + env_time_t total_time = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) + total_time = env_time_sum(total_time, core_stat[i].diff_time); + + double cycles_per_event = 0.0; + double events_per_sec = 0.0; + + if (likely(total_events > 0)) + cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / + (double)total_events; + if (likely(cycles_per_event > 0)) /* Million events/s: */ + events_per_sec = cpu_mhz * num_cores / cycles_per_event; + + /* + * Print without latency statistics + */ + if (!MEASURE_LATENCY) { + if (print_header) + APPL_PRINT(RESULT_PRINTF_HDR); + APPL_PRINT(RESULT_PRINTF_FMT, + cycles_per_event, events_per_sec, + cpu_mhz, print_count); + return; + } + + /* + * Print with latency statistics + */ + uint64_t latency_events = 0; + env_time_t latency_hi_ave = ENV_TIME_NULL; + env_time_t latency_hi_max = ENV_TIME_NULL; + env_time_t latency_lo_ave = ENV_TIME_NULL; + env_time_t latency_lo_max = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) { + latency_events += core_stat[i].latency.events; + + latency_hi_ave = env_time_sum(latency_hi_ave, + core_stat[i].latency.sched_ave); + latency_lo_ave = env_time_sum(latency_lo_ave, + core_stat[i].latency.local_ave); + + if (env_time_cmp(core_stat[i].latency.sched_max, + latency_hi_max) > 0) { + latency_hi_max = core_stat[i].latency.sched_max; + } + if (env_time_cmp(core_stat[i].latency.local_max, + latency_lo_max) > 0) { + latency_lo_max = core_stat[i].latency.local_max; + } + } + + double lat_per_hi_ave = 0.0; + double lat_per_lo_ave = 0.0; + + if (likely(latency_events > 0)) { + lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / + (double)latency_events; + lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / + (double)latency_events; + } + + if (print_header) + APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); + APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, + cycles_per_event, events_per_sec, lat_per_hi_ave, + env_time_to_cycles(latency_hi_max, cpu_hz), + lat_per_lo_ave, + env_time_to_cycles(latency_lo_max, cpu_hz), + cpu_mhz, print_count); +} + +/** + * Free the input event and allocate a new one instead + */ +static inline em_event_t +alloc_free_per_event(em_event_t event) +{ + perf_event_t *perf_event = em_event_pointer(event); + env_time_t send_time = perf_event->send_time; + int seq = perf_event->seq; + uint32_t event_size = em_event_get_size(event); + + em_free(event); + + event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); + + perf_event = em_event_pointer(event); + + perf_event->send_time = send_time; + perf_event->seq = seq; + + return event; +} + +/** + * Measure the scheduling latency per event + */ +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time) +{ + const int core = em_core_id(); + core_stat_t *const cstat = &perf_shm->core_stat[core]; + const env_time_t send_time = perf_event->send_time; + env_time_t latency; + + if (perf_shm->test_status.reset_flag || + cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) + return; + + cstat->latency.events++; + + latency = env_time_diff(recv_time, send_time); + + if (q_ctx->type != EM_QUEUE_TYPE_LOCAL) { + cstat->latency.sched_ave = + env_time_sum(cstat->latency.sched_ave, latency); + if (env_time_cmp(latency, cstat->latency.sched_max) > 0) + cstat->latency.sched_max = latency; + } else { + cstat->latency.local_ave = + env_time_sum(cstat->latency.local_ave, latency); + + if (env_time_cmp(latency, cstat->latency.local_max) > 0) + cstat->latency.local_max = latency; + } +} diff --git a/programs/performance/queues_unscheduled.c b/programs/performance/queues_unscheduled.c index 3caf752e..b2c8903a 100644 --- a/programs/performance/queues_unscheduled.c +++ b/programs/performance/queues_unscheduled.c @@ -1,1206 +1,1206 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test - * (based on the queues.c test and extends it to use also unscheduled - * queues) - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of queues and events in the system. The test increases - * the number of queues[+events] for each measurement round and prints the - * results. The test will stop if the maximum number of supported queues by the - * system is reached. - * - * Each normal scheduled queue is accompanied by an unscheduled queue that is - * dequeued from at each event receive. Both the received event and the - * dequeued event is sent to the next queue at the end of the receive function. - * - * The measured cycles contain the scheduled event send-sched-receive cycles as - * well as the unscheduled event dequeue - * - * Plot the cycles/event to get an idea of how the system scales with an - * increasing number of queues. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test options: - */ - -/* Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ - -/* - * Create all EM queues at startup or create the queues during - * the test in steps. - */ -#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ - -/* - * Measure the send-enqueue-schedule-receive latency. Measured separately for - * 'high priority and 'low priority' queues (ratio 1:4). - */ -#define MEASURE_LATENCY 1 /* false=0 or true=1 */ - -/* - * Keep the number of events constant while increasing the number of queues. - * Should be dividable by or factor of queue_step. - */ -#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ - -/* - * Test configuration: - */ - -#define MAX_CORES 64 - -/* Number of EO's and scheduled queues in a loop */ -#define NUM_EOS 4 - -/* Number of events per queue */ -#define NUM_EVENTS 4 - -#if CONST_NUM_EVENTS > 0 -/* - * Total number of queues when using a constant number of events. - * Make sure that all queues, both scheduled and unscheduled (hence /2), - * get 'NUM_EVENTS' events per queue. - */ -#define NUM_QUEUES ((CONST_NUM_EVENTS / NUM_EVENTS) / 2) -#else -/* - * Total number of queues when increasing the total event count for each queue - * step. - */ -#define NUM_QUEUES (NUM_EOS * 16 * 1024) -#endif - -/* Number of data bytes in an event */ -#define DATA_SIZE 128 - -/* Samples before adding more queues */ -#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ - -/* Num events a core processes between samples */ -#define EVENTS_PER_SAMPLE 0x400000 - -/* EM queue type */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/* Core states during test. */ -#define CORE_STATE_MEASURE 0 -#define CORE_STATE_IDLE 1 - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" -#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" - -/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ -#define RESULT_PRINTF_LATENCY_HDR \ -"Cycles/ Events/ Latency:\n" \ -" Event Sec hi-ave hi-max lo-ave lo-max cpu-freq\n" -#define RESULT_PRINTF_LATENCY_FMT \ -"%6.0f %7.2f M %8.0f %7" PRIu64 " %7.0f %7" PRIu64 " %5.0f MHz %" PRIu64 "\n" - -/* - * The number of scheduled queues to use in each test step. - * Additional unscheduled queues are also created for each step. - * - * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of - * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop - * before reaching the end of the list). - */ -static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, - 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; - -/** - * Test state, - * cache line alignment and padding handled in 'perf_shm_t' - */ -typedef struct { - int queues; - int step; - int samples; - int num_cores; - int reset_flag; - double cpu_mhz; - uint64_t cpu_hz; - uint64_t print_count; - env_atomic64_t ready_count; - /* if using CONST_NUM_EVENTS:*/ - int free_flag; - env_atomic64_t freed_count; -} test_status_t; - -/** - * Performance test statistics (per core) - */ -typedef struct { - uint64_t events; - env_time_t begin_time; - env_time_t end_time; - env_time_t diff_time; - struct { - uint64_t events; - env_time_t hi_prio_ave; - env_time_t hi_prio_max; - env_time_t lo_prio_ave; - env_time_t lo_prio_max; - } latency; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, - CORE_STAT_SIZE_ERROR); - -/** - * EO context data - */ -typedef struct { - em_eo_t eo_id; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} eo_context_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, - EO_CONTEXT_T__SIZE_ERROR); - -/** - * Queue context data - * Keep the scheduled queue context and the associated unscheduled queue - * context data in the same cache line for faster access - also eliminates - * the need to call em_queue_get_context() for the unscheduled queues. - */ -typedef struct { - struct scheduled_queue_context { - /** This queue */ - em_queue_t this_queue; - /** Next queue */ - em_queue_t next_queue; - /** Priority of 'this_queue' */ - em_queue_prio_t prio; - } sch_q; - - struct unscheduled_queue_context { - /** This unscheduled queue */ - em_queue_t this_queue; - /** Next unscheduled queue */ - em_queue_t next_queue; - } unsch_q; - - /** Pad to multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_context_t; - -COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, - QUEUE_CONTEXT_SIZE_ERROR); - -/** - * Performance test event - */ -typedef struct { - /* Send time stamp */ - env_time_t send_time; - /* Sequence number */ - int seq; - /* Test data */ - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - - test_status_t test_status ENV_CACHE_LINE_ALIGNED; - - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; - - eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; - - queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; - /* EO ID's */ - em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; -} perf_shm_t; - -COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, - PERF_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; - -/* EM-core local state */ -static ENV_LOCAL int core_state = CORE_STATE_MEASURE; - -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); - -static void -queue_step(void); - -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context); - -static int -update_test_state(em_event_t event, em_event_t unsch_event); - -static void -create_and_link_queues(int start_queue, int num_queues); - -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]); - -static inline em_event_t -alloc_free_per_event(em_event_t event); - -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Test error handler - * - * @param eo Execution object id - * @param error The error code - * @param escope Error scope - * @param args List of arguments (__FILE__, __func__, __LINE__, - * (format), ## __VA_ARGS__) - * - * @return The original error code. - */ -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) -{ - if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { - APPL_PRINT("\nUnable to create more queues\n\n" - "Test finished\n"); - raise(SIGINT); - return error; - } - - if (appl_shm->exit_flag && EM_ESCOPE(escope) && - !EM_ERROR_IS_FATAL(error)) { - /* Suppress non-fatal EM-error logs during tear-down */ - if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { - APPL_PRINT("\nExit: suppress queue setup error\n\n"); - return error; - } - } - - return test_error_handler(eo, error, escope, args); -} - -/** - * Init of the Queues performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfQueuesSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(error_handler); - } else { - perf_shm = env_shared_lookup("PerfQueuesSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf test queues init failed on EM-core: %u\n", - em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Queues performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - eo_context_t *eo_ctx; - em_status_t ret, start_ret = EM_ERROR; - const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); - int i; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - " Max. NUM_QUEUES: %i\n" - " sizeof queue_context_tbl: %i kB\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - perf_shm->test_status.cpu_hz = env_core_hz(); - perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / - 1000000.0; - perf_shm->test_status.reset_flag = 0; - perf_shm->test_status.num_cores = em_core_count(); - perf_shm->test_status.free_flag = 0; - - env_atomic64_init(&perf_shm->test_status.ready_count); - env_atomic64_init(&perf_shm->test_status.freed_count); - - /* Create EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo_ctx = &perf_shm->eo_context_tbl[i]; - perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, - stop, NULL, receive_func, - eo_ctx); - test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, - "EO create failed:%d", i, NUM_EOS); - } - - APPL_PRINT(" EOs created\n"); - - /* - * Create and link queues - */ - if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ - create_and_link_queues(0, NUM_QUEUES); - else /* Create queues for the first step, then more before each step */ - create_and_link_queues(0, queue_steps[0]); - - /* Start EOs */ - for (i = 0; i < NUM_EOS; i++) { - ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start(%d):%" PRI_STAT " %" PRI_STAT "", - i, ret, start_ret); - } - - queue_step(); -} - -/** - * Stop the test, only run on one core - */ -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* Stop EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", - eo, ret); - } - - /* Remove and delete all of the EO's queues, then delete the EO */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE/*delete Qs*/); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", - eo, ret); - } - - for (i = 0; i < NUM_QUEUES; i++) { - queue_context_t *q_ctx = &perf_shm->queue_context_tbl[i]; - em_queue_t unsch_queue = q_ctx->unsch_q.this_queue; - em_event_t unsch_event; - - if (unsch_queue == EM_QUEUE_UNDEF) - continue; - - for (;;) { - unsch_event = em_queue_dequeue(unsch_queue); - if (unsch_event == EM_EVENT_UNDEF) - break; - em_free(unsch_event); - } - ret = em_queue_delete(unsch_queue); - test_fatal_if(ret != EM_OK, - "Unsch-Queue:%" PRI_QUEUE " delete:%" PRI_STAT "", - unsch_queue, ret); - } -} - -/** - * Terminate the test, only run on one core - */ -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - env_shared_free(perf_shm); - em_unregister_error_handler(); -} - -/** - * Allocate, initialize and send test step events. - */ -static void -queue_step(void) -{ - queue_context_t *q_ctx; - perf_event_t *perf_event; - em_status_t ret; - const int first = perf_shm->test_status.queues; - const int step = perf_shm->test_status.step; - const int queue_count = queue_steps[step]; - int i, j; - - /* Allocate and send test events for the queues */ - if (CONST_NUM_EVENTS) { - for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { - em_event_t unsch_event; - - unsch_event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(unsch_event == EM_EVENT_UNDEF, - "EM alloc failed (%i)", i); - - /* Allocate events evenly to the queues */ - q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; - - ret = em_send(unsch_event, q_ctx->unsch_q.this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Unsched-Q:%" PRI_QUEUE "", - ret, q_ctx->unsch_q.this_queue); - em_free(unsch_event); - return; - } - } - for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { - em_event_t event; - - event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF || - sizeof(perf_event_t) != - em_event_get_size(event), - "EM alloc failed (%i)", i); - - perf_event = em_event_pointer(event); - perf_event->seq = i; - perf_event->send_time = env_time_global(); - - /* Allocate events evenly to the queues */ - q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; - - ret = em_send(event, q_ctx->sch_q.this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, q_ctx->sch_q.this_queue); - em_free(event); - return; - } - } - } else { - for (i = first; i < queue_count; i++) { - em_event_t unsch_events[NUM_EVENTS]; - int num; - - q_ctx = &perf_shm->queue_context_tbl[i]; - - for (j = 0; j < NUM_EVENTS; j++) { - unsch_events[j] = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(unsch_events[j] == EM_EVENT_UNDEF, - "EM alloc failed (%d, %d)", i, j); - } - num = em_send_multi(unsch_events, NUM_EVENTS, - q_ctx->unsch_q.this_queue); - if (unlikely(num != NUM_EVENTS)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send multi:%d\n" - "Unsched-Q:%" PRI_QUEUE "", - num, q_ctx->unsch_q.this_queue); - em_free_multi(&unsch_events[num], NUM_EVENTS - num); - return; - } - } - for (i = first; i < queue_count; i++) { - em_event_t events[NUM_EVENTS]; - int num; - - q_ctx = &perf_shm->queue_context_tbl[i]; - - for (j = 0; j < NUM_EVENTS; j++) { - events[j] = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(events[j] == EM_EVENT_UNDEF || - sizeof(perf_event_t) != - em_event_get_size(events[j]), - "EM alloc failed (%d,%d)", i, j); - - perf_event = em_event_pointer(events[j]); - perf_event->seq = i * NUM_EVENTS + j; - perf_event->send_time = env_time_global(); - } - num = em_send_multi(events, NUM_EVENTS, - q_ctx->sch_q.this_queue); - if (unlikely(num != NUM_EVENTS)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send multi:%d\n" - "Queue:%" PRI_QUEUE "", - num, q_ctx->sch_q.this_queue); - em_free_multi(&events[num], NUM_EVENTS - num); - return; - } - } - } - - perf_shm->test_status.queues = queue_count; - perf_shm->test_status.step++; - - APPL_PRINT("\nNumber of queues: %6.0d + %d\n", - queue_count, queue_count); - if (CONST_NUM_EVENTS) - APPL_PRINT("Number of events: %6.0d + %d\n", - CONST_NUM_EVENTS / 2, CONST_NUM_EVENTS / 2); - else - APPL_PRINT("Number of events: %6.0d + %d\n", - queue_count * NUM_EVENTS, queue_count * NUM_EVENTS); -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo_id = eo; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - return EM_OK; -} - -/** - * @private - * - * EO receive function. - * - * Loops back events and calculates the event rate. - */ -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context) -{ - env_time_t recv_time; - perf_event_t *perf_event; - - if (MEASURE_LATENCY) { - recv_time = env_time_global(); - perf_event = em_event_pointer(event); - } - - queue_context_t *q_ctx; - em_queue_t dst_queue; - em_queue_t src_unsch_queue; - em_queue_t dst_unsch_queue; - em_event_t unsch_event; - em_status_t ret; - int do_return; - - (void)eo_context; - (void)type; - - q_ctx = q_context; - src_unsch_queue = q_ctx->unsch_q.this_queue; - - /* - * Dequeue an unscheduled event for every received scheduled event - */ - unsch_event = em_queue_dequeue(src_unsch_queue); - test_fatal_if(unsch_event == EM_EVENT_UNDEF && !appl_shm->exit_flag, - "em_queue_dequeue() error"); - - /* Free all events if the exit-flag is set (program termination) */ - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - em_free(unsch_event); - return; - } - - /* - * Helper: Update the test state, count recv events, - * calc & print stats, prepare for next step - */ - do_return = update_test_state(event, unsch_event); - if (unlikely(do_return)) - return; - - if (ALLOC_FREE_PER_EVENT) - event = alloc_free_per_event(event); - - dst_queue = q_ctx->sch_q.next_queue; - dst_unsch_queue = q_ctx->unsch_q.next_queue; - test_fatal_if(queue != q_ctx->sch_q.this_queue, "Queue config error"); - - if (MEASURE_LATENCY) - measure_latency(perf_event, q_ctx, recv_time); - - /* Enqueue the unscheduled event to the next unscheduled queue */ - ret = em_send(unsch_event, dst_unsch_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT " Unsched-Q: %" PRI_QUEUE "", - ret, dst_unsch_queue); - } - - /* Send the scheduled event to the next scheduled queue */ - if (MEASURE_LATENCY) - perf_event->send_time = env_time_global(); - ret = em_send(event, dst_queue); - if (unlikely(ret != EM_OK)) { - em_free(event); - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT " Queue:%" PRI_QUEUE "", - ret, dst_queue); - } -} - -/** - * Receive function helper: Update the test state - * - * Calculates the number of received events, maintains & prints test statistics - * and restarts/reconfigures the test for the next queue/event-setup - * - * @return '1' if the caller receive function should immediately return, - * '0' otherwise - */ -static inline int -update_test_state(em_event_t event, em_event_t unsch_event) -{ - uint64_t events; - uint64_t freed_count; - uint64_t ready_count; - const int core = em_core_id(); - test_status_t *const tstat = &perf_shm->test_status; - core_stat_t *const cstat = &perf_shm->core_stat[core]; - - events = cstat->events; - /* one scheduled and one unscheduled event received */ - events += 2; - - if (unlikely(tstat->reset_flag)) { - events = 0; - if (CONST_NUM_EVENTS) { - /* Free all old events before allocating new ones. */ - if (unlikely(tstat->free_flag)) { - em_free(event); - em_free(unsch_event); - freed_count = - env_atomic64_add_return(&tstat->freed_count, 2); - if (freed_count == CONST_NUM_EVENTS) { - /* Last event */ - env_atomic64_set(&tstat->freed_count, - 0); - tstat->reset_flag = 0; - tstat->free_flag = 0; - queue_step(); - } - /* Req caller receive-func to return */ - return 1; - } - } - - if (unlikely(core_state != CORE_STATE_IDLE)) { - core_state = CORE_STATE_IDLE; - cstat->begin_time = ENV_TIME_NULL; - - ready_count = - env_atomic64_add_return(&tstat->ready_count, 1); - - if (ready_count == (uint64_t)tstat->num_cores) { - env_atomic64_set(&tstat->ready_count, 0); - - if (CONST_NUM_EVENTS) { - int sample = tstat->samples; - int queues = tstat->queues; - - if (sample == 0 && queues < NUM_QUEUES) - tstat->free_flag = 1; - else - tstat->reset_flag = 0; - } else { - tstat->reset_flag = 0; - } - } - } - } else if (unlikely(events == 2)) { - cstat->begin_time = env_time_global(); - cstat->latency.events = 0; - cstat->latency.hi_prio_ave = ENV_TIME_NULL; - cstat->latency.hi_prio_max = ENV_TIME_NULL; - cstat->latency.lo_prio_ave = ENV_TIME_NULL; - cstat->latency.lo_prio_max = ENV_TIME_NULL; - - core_state = CORE_STATE_MEASURE; - } else if (unlikely(events == EVENTS_PER_SAMPLE)) { - /* - * Measurements done for this step. Store results and continue - * receiving events until all cores are done. - */ - env_time_t begin_time, end_time; - - cstat->end_time = env_time_global(); - - end_time = cstat->end_time; - begin_time = cstat->begin_time; - cstat->diff_time = env_time_diff(end_time, begin_time); - - ready_count = env_atomic64_add_return(&tstat->ready_count, 1); - - /* - * Check whether all cores are done with the step, - * and if done proceed to the next step - */ - if (unlikely((int)ready_count == tstat->num_cores)) { - /* No real need for atomicity here, ran on last core*/ - env_atomic64_set(&tstat->ready_count, 0); - - tstat->reset_flag = 1; - tstat->samples++; - - /* - * Print statistics. - * Omit prints for the first sample round to allow the - * test to stabilize after setups and teardowns. - */ - if (tstat->samples > 1) { - int print_header = tstat->samples == 2 ? 1 : 0; - - print_test_statistics(tstat, print_header, - perf_shm->core_stat); - } - - /* - * Start next test step - setup new queues - */ - if (tstat->samples == NUM_SAMPLES && - tstat->queues < NUM_QUEUES) { - if (!CREATE_ALL_QUEUES_AT_STARTUP) { - int step = tstat->step; - int first_q = tstat->queues; - int num_qs = queue_steps[step] - - queue_steps[step - 1]; - - create_and_link_queues(first_q, num_qs); - } - - if (!CONST_NUM_EVENTS) - queue_step(); - - tstat->samples = 0; - } - } - } - - cstat->events = events; - - return 0; -} - -/** - * Creates a number of EM queues, associates them with EOs, and links them. - */ -static void -create_and_link_queues(int start_queue, int num_queues) -{ - int i, j; - em_queue_t queue, next_queue; - em_queue_t queue_unscheduled, next_unscheduled; - em_queue_conf_t unsch_conf; - em_queue_prio_t prio; - em_status_t ret; - queue_context_t *q_ctx; - - APPL_PRINT("\nCreate new queues - scheduled:%d + unscheduled:%d\n", - num_queues, num_queues); - - if (num_queues % NUM_EOS != 0) { - APPL_PRINT("%s() arg 'num_queues'=%d not multiple of NUM_EOS=%d\n", - __func__, num_queues, NUM_EOS); - return; - } - - memset(&unsch_conf, 0, sizeof(unsch_conf)); - if (QUEUE_TYPE == EM_QUEUE_TYPE_ATOMIC) { - /* - * If the EO receives are running with an atomic context then - * unsched queue enq/deq can be multithread unsafe to possibly - * boost perf. - */ - unsch_conf.flags |= EM_QUEUE_FLAG_ENQ_NOT_MTSAFE; - unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE; - } - - for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { - next_queue = EM_QUEUE_UNDEF; - next_unscheduled = EM_QUEUE_UNDEF; - - for (j = 0; j < NUM_EOS; j++) { - prio = EM_QUEUE_PRIO_NORMAL; - - if (MEASURE_LATENCY) { - if (j == 0) - prio = EM_QUEUE_PRIO_HIGH; - } - - q_ctx = &perf_shm->queue_context_tbl[i + j]; - - /* - * Create a new scheduled queue - */ - queue = em_queue_create("queue", QUEUE_TYPE, prio, - EM_QUEUE_GROUP_DEFAULT, NULL); - if (queue == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - 2 * i); - return; - } - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "em_queue_set_context:%" PRI_STAT "\n" - "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", - perf_shm->eo[j], queue); - /* Add the scheduled queue to an EO and enable it */ - ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "em_eo_add_queue_sync():%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - em_queue_delete(queue); - return; - } - /* Link scheduled queues */ - q_ctx->sch_q.this_queue = queue; - q_ctx->sch_q.next_queue = next_queue; - q_ctx->sch_q.prio = prio; - - /* - * Create a new unscheduled queue - */ - queue_unscheduled = - em_queue_create("unscheduled_queue", - EM_QUEUE_TYPE_UNSCHEDULED, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, - &unsch_conf); - if (queue_unscheduled == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - 2 * i + 1); - return; - } - - /* Link unscheduled queues */ - q_ctx->unsch_q.this_queue = queue_unscheduled; - q_ctx->unsch_q.next_queue = next_unscheduled; - - /* - * Set the same top level queue context for both the - * scheduled and the unscheduled queue, access queue - * specific context by using q_ctx->sch_q.* or - * q_ctx->unsch_q.* - this eliminates the need to call - * em_queue_get_context() for each event for the - * unscheduled queues - */ - ret = em_queue_set_context(queue_unscheduled, q_ctx); - test_fatal_if(ret != EM_OK, - "em_queue_set_context:%" PRI_STAT "\n" - "Unsched-Q:%" PRI_QUEUE "", - ret, queue_unscheduled); - /* Sanity check */ - test_fatal_if(em_queue_get_context(queue) != - em_queue_get_context(queue_unscheduled), - "em_queue_get_context failed."); - - next_queue = queue; - next_unscheduled = queue_unscheduled; - } - - /* Connect first scheduled queue to the last */ - q_ctx = &perf_shm->queue_context_tbl[i + 0]; - q_ctx->sch_q.next_queue = next_queue; - q_ctx->unsch_q.next_queue = next_unscheduled; - } -} - -/** - * Print test statistics - */ -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]) -{ - const int num_cores = test_status->num_cores; - const uint64_t cpu_hz = test_status->cpu_hz; - const double cpu_mhz = test_status->cpu_mhz; - const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; - const uint64_t print_count = test_status->print_count++; - env_time_t total_time = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) - total_time = env_time_sum(total_time, core_stat[i].diff_time); - - double cycles_per_event = 0.0; - double events_per_sec = 0.0; - - if (likely(total_events > 0)) - cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / - (double)total_events; - if (likely(cycles_per_event > 0)) /* Million events/s: */ - events_per_sec = cpu_mhz * num_cores / cycles_per_event; - - /* - * Print without latency statistics - */ - if (!MEASURE_LATENCY) { - if (print_header) - APPL_PRINT(RESULT_PRINTF_HDR); - APPL_PRINT(RESULT_PRINTF_FMT, - cycles_per_event, events_per_sec, - cpu_mhz, print_count); - return; - } - - /* - * Print with latency statistics - */ - uint64_t latency_events = 0; - env_time_t latency_hi_ave = ENV_TIME_NULL; - env_time_t latency_hi_max = ENV_TIME_NULL; - env_time_t latency_lo_ave = ENV_TIME_NULL; - env_time_t latency_lo_max = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) { - latency_events += core_stat[i].latency.events; - - latency_hi_ave = env_time_sum(latency_hi_ave, - core_stat[i].latency.hi_prio_ave); - latency_lo_ave = env_time_sum(latency_lo_ave, - core_stat[i].latency.lo_prio_ave); - - if (env_time_cmp(core_stat[i].latency.hi_prio_max, - latency_hi_max) > 0) { - latency_hi_max = core_stat[i].latency.hi_prio_max; - } - if (env_time_cmp(core_stat[i].latency.lo_prio_max, - latency_lo_max) > 0) { - latency_lo_max = core_stat[i].latency.lo_prio_max; - } - } - - double lat_per_hi_ave = 0.0; - double lat_per_lo_ave = 0.0; - - if (likely(latency_events > 0)) { - lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / - (double)latency_events; - lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / - (double)latency_events; - } - - if (print_header) - APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); - APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, - cycles_per_event, events_per_sec, lat_per_hi_ave, - env_time_to_cycles(latency_hi_max, cpu_hz), - lat_per_lo_ave, - env_time_to_cycles(latency_lo_max, cpu_hz), - cpu_mhz, print_count); -} - -/** - * Free the input event and allocate a new one instead - */ -static inline em_event_t -alloc_free_per_event(em_event_t event) -{ - perf_event_t *perf_event = em_event_pointer(event); - env_time_t send_time = perf_event->send_time; - int seq = perf_event->seq; - size_t event_size = em_event_get_size(event); - - em_free(event); - - event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); - - perf_event = em_event_pointer(event); - - perf_event->send_time = send_time; - perf_event->seq = seq; - - return event; -} - -/** - * Measure the scheduling latency per event - */ -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time) -{ - const int core = em_core_id(); - core_stat_t *const cstat = &perf_shm->core_stat[core]; - const env_time_t send_time = perf_event->send_time; - env_time_t latency; - - if (perf_shm->test_status.reset_flag || - cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) - return; - - cstat->latency.events++; - - latency = env_time_diff(recv_time, send_time); - - if (q_ctx->sch_q.prio == EM_QUEUE_PRIO_HIGH) { - cstat->latency.hi_prio_ave = - env_time_sum(cstat->latency.hi_prio_ave, latency); - if (env_time_cmp(latency, cstat->latency.hi_prio_max) > 0) - cstat->latency.hi_prio_max = latency; - } else { - cstat->latency.lo_prio_ave = - env_time_sum(cstat->latency.lo_prio_ave, latency); - if (env_time_cmp(latency, cstat->latency.lo_prio_max) > 0) - cstat->latency.lo_prio_max = latency; - } -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test + * (based on the queues.c test and extends it to use also unscheduled + * queues) + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of queues and events in the system. The test increases + * the number of queues[+events] for each measurement round and prints the + * results. The test will stop if the maximum number of supported queues by the + * system is reached. + * + * Each normal scheduled queue is accompanied by an unscheduled queue that is + * dequeued from at each event receive. Both the received event and the + * dequeued event is sent to the next queue at the end of the receive function. + * + * The measured cycles contain the scheduled event send-sched-receive cycles as + * well as the unscheduled event dequeue + * + * Plot the cycles/event to get an idea of how the system scales with an + * increasing number of queues. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test options: + */ + +/* Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ + +/* + * Create all EM queues at startup or create the queues during + * the test in steps. + */ +#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ + +/* + * Measure the send-enqueue-schedule-receive latency. Measured separately for + * 'high priority and 'low priority' queues (ratio 1:4). + */ +#define MEASURE_LATENCY 1 /* false=0 or true=1 */ + +/* + * Keep the number of events constant while increasing the number of queues. + * Should be dividable by or factor of queue_step. + */ +#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ + +/* + * Test configuration: + */ + +#define MAX_CORES 64 + +/* Number of EO's and scheduled queues in a loop */ +#define NUM_EOS 4 + +/* Number of events per queue */ +#define NUM_EVENTS 4 + +#if CONST_NUM_EVENTS > 0 +/* + * Total number of queues when using a constant number of events. + * Make sure that all queues, both scheduled and unscheduled (hence /2), + * get 'NUM_EVENTS' events per queue. + */ +#define NUM_QUEUES ((CONST_NUM_EVENTS / NUM_EVENTS) / 2) +#else +/* + * Total number of queues when increasing the total event count for each queue + * step. + */ +#define NUM_QUEUES (NUM_EOS * 16 * 1024) +#endif + +/* Number of data bytes in an event */ +#define DATA_SIZE 128 + +/* Samples before adding more queues */ +#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ + +/* Num events a core processes between samples */ +#define EVENTS_PER_SAMPLE 0x400000 + +/* EM queue type */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/* Core states during test. */ +#define CORE_STATE_MEASURE 0 +#define CORE_STATE_IDLE 1 + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" +#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" + +/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ +#define RESULT_PRINTF_LATENCY_HDR \ +"Cycles/ Events/ Latency:\n" \ +" Event Sec hi-ave hi-max lo-ave lo-max cpu-freq\n" +#define RESULT_PRINTF_LATENCY_FMT \ +"%6.0f %7.2f M %8.0f %7" PRIu64 " %7.0f %7" PRIu64 " %5.0f MHz %" PRIu64 "\n" + +/* + * The number of scheduled queues to use in each test step. + * Additional unscheduled queues are also created for each step. + * + * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of + * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop + * before reaching the end of the list). + */ +static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, + 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; + +/** + * Test state, + * cache line alignment and padding handled in 'perf_shm_t' + */ +typedef struct { + int queues; + int step; + int samples; + int num_cores; + int reset_flag; + double cpu_mhz; + uint64_t cpu_hz; + uint64_t print_count; + env_atomic64_t ready_count; + /* if using CONST_NUM_EVENTS:*/ + int free_flag; + env_atomic64_t freed_count; +} test_status_t; + +/** + * Performance test statistics (per core) + */ +typedef struct { + uint64_t events; + env_time_t begin_time; + env_time_t end_time; + env_time_t diff_time; + struct { + uint64_t events; + env_time_t hi_prio_ave; + env_time_t hi_prio_max; + env_time_t lo_prio_ave; + env_time_t lo_prio_max; + } latency; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, + CORE_STAT_SIZE_ERROR); + +/** + * EO context data + */ +typedef struct { + em_eo_t eo_id; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} eo_context_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, + EO_CONTEXT_T__SIZE_ERROR); + +/** + * Queue context data + * Keep the scheduled queue context and the associated unscheduled queue + * context data in the same cache line for faster access - also eliminates + * the need to call em_queue_get_context() for the unscheduled queues. + */ +typedef struct { + struct scheduled_queue_context { + /** This queue */ + em_queue_t this_queue; + /** Next queue */ + em_queue_t next_queue; + /** Priority of 'this_queue' */ + em_queue_prio_t prio; + } sch_q; + + struct unscheduled_queue_context { + /** This unscheduled queue */ + em_queue_t this_queue; + /** Next unscheduled queue */ + em_queue_t next_queue; + } unsch_q; + + /** Pad to multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_context_t; + +COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, + QUEUE_CONTEXT_SIZE_ERROR); + +/** + * Performance test event + */ +typedef struct { + /* Send time stamp */ + env_time_t send_time; + /* Sequence number */ + int seq; + /* Test data */ + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + + test_status_t test_status ENV_CACHE_LINE_ALIGNED; + + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; + + eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; + + queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; + /* EO ID's */ + em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; +} perf_shm_t; + +COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, + PERF_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; + +/* EM-core local state */ +static ENV_LOCAL int core_state = CORE_STATE_MEASURE; + +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); + +static void +queue_step(void); + +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context); + +static int +update_test_state(em_event_t event, em_event_t unsch_event); + +static void +create_and_link_queues(int start_queue, int num_queues); + +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]); + +static inline em_event_t +alloc_free_per_event(em_event_t event); + +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Test error handler + * + * @param eo Execution object id + * @param error The error code + * @param escope Error scope + * @param args List of arguments (__FILE__, __func__, __LINE__, + * (format), ## __VA_ARGS__) + * + * @return The original error code. + */ +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) +{ + if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { + APPL_PRINT("\nUnable to create more queues\n\n" + "Test finished\n"); + raise(SIGINT); + return error; + } + + if (appl_shm->exit_flag && EM_ESCOPE(escope) && + !EM_ERROR_IS_FATAL(error)) { + /* Suppress non-fatal EM-error logs during tear-down */ + if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { + APPL_PRINT("\nExit: suppress queue setup error\n\n"); + return error; + } + } + + return test_error_handler(eo, error, escope, args); +} + +/** + * Init of the Queues performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfQueuesSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(error_handler); + } else { + perf_shm = env_shared_lookup("PerfQueuesSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf test queues init failed on EM-core: %u\n", + em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Queues performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + eo_context_t *eo_ctx; + em_status_t ret, start_ret = EM_ERROR; + const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); + int i; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + " Max. NUM_QUEUES: %i\n" + " sizeof queue_context_tbl: %i kB\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + perf_shm->test_status.cpu_hz = env_core_hz(); + perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / + 1000000.0; + perf_shm->test_status.reset_flag = 0; + perf_shm->test_status.num_cores = em_core_count(); + perf_shm->test_status.free_flag = 0; + + env_atomic64_init(&perf_shm->test_status.ready_count); + env_atomic64_init(&perf_shm->test_status.freed_count); + + /* Create EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo_ctx = &perf_shm->eo_context_tbl[i]; + perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, + stop, NULL, receive_func, + eo_ctx); + test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, + "EO create failed:%d", i, NUM_EOS); + } + + APPL_PRINT(" EOs created\n"); + + /* + * Create and link queues + */ + if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ + create_and_link_queues(0, NUM_QUEUES); + else /* Create queues for the first step, then more before each step */ + create_and_link_queues(0, queue_steps[0]); + + /* Start EOs */ + for (i = 0; i < NUM_EOS; i++) { + ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start(%d):%" PRI_STAT " %" PRI_STAT "", + i, ret, start_ret); + } + + queue_step(); +} + +/** + * Stop the test, only run on one core + */ +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* Stop EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", + eo, ret); + } + + /* Remove and delete all of the EO's queues, then delete the EO */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE/*delete Qs*/); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", + eo, ret); + } + + for (i = 0; i < NUM_QUEUES; i++) { + queue_context_t *q_ctx = &perf_shm->queue_context_tbl[i]; + em_queue_t unsch_queue = q_ctx->unsch_q.this_queue; + em_event_t unsch_event; + + if (unsch_queue == EM_QUEUE_UNDEF) + continue; + + for (;;) { + unsch_event = em_queue_dequeue(unsch_queue); + if (unsch_event == EM_EVENT_UNDEF) + break; + em_free(unsch_event); + } + ret = em_queue_delete(unsch_queue); + test_fatal_if(ret != EM_OK, + "Unsch-Queue:%" PRI_QUEUE " delete:%" PRI_STAT "", + unsch_queue, ret); + } +} + +/** + * Terminate the test, only run on one core + */ +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + env_shared_free(perf_shm); + em_unregister_error_handler(); +} + +/** + * Allocate, initialize and send test step events. + */ +static void +queue_step(void) +{ + queue_context_t *q_ctx; + perf_event_t *perf_event; + em_status_t ret; + const int first = perf_shm->test_status.queues; + const int step = perf_shm->test_status.step; + const int queue_count = queue_steps[step]; + int i, j; + + /* Allocate and send test events for the queues */ + if (CONST_NUM_EVENTS) { + for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { + em_event_t unsch_event; + + unsch_event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(unsch_event == EM_EVENT_UNDEF, + "EM alloc failed (%i)", i); + + /* Allocate events evenly to the queues */ + q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; + + ret = em_send(unsch_event, q_ctx->unsch_q.this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Unsched-Q:%" PRI_QUEUE "", + ret, q_ctx->unsch_q.this_queue); + em_free(unsch_event); + return; + } + } + for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { + em_event_t event; + + event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF || + sizeof(perf_event_t) != + em_event_get_size(event), + "EM alloc failed (%i)", i); + + perf_event = em_event_pointer(event); + perf_event->seq = i; + perf_event->send_time = env_time_global(); + + /* Allocate events evenly to the queues */ + q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; + + ret = em_send(event, q_ctx->sch_q.this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, q_ctx->sch_q.this_queue); + em_free(event); + return; + } + } + } else { + for (i = first; i < queue_count; i++) { + em_event_t unsch_events[NUM_EVENTS]; + int num; + + q_ctx = &perf_shm->queue_context_tbl[i]; + + for (j = 0; j < NUM_EVENTS; j++) { + unsch_events[j] = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(unsch_events[j] == EM_EVENT_UNDEF, + "EM alloc failed (%d, %d)", i, j); + } + num = em_send_multi(unsch_events, NUM_EVENTS, + q_ctx->unsch_q.this_queue); + if (unlikely(num != NUM_EVENTS)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send multi:%d\n" + "Unsched-Q:%" PRI_QUEUE "", + num, q_ctx->unsch_q.this_queue); + em_free_multi(&unsch_events[num], NUM_EVENTS - num); + return; + } + } + for (i = first; i < queue_count; i++) { + em_event_t events[NUM_EVENTS]; + int num; + + q_ctx = &perf_shm->queue_context_tbl[i]; + + for (j = 0; j < NUM_EVENTS; j++) { + events[j] = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(events[j] == EM_EVENT_UNDEF || + sizeof(perf_event_t) != + em_event_get_size(events[j]), + "EM alloc failed (%d,%d)", i, j); + + perf_event = em_event_pointer(events[j]); + perf_event->seq = i * NUM_EVENTS + j; + perf_event->send_time = env_time_global(); + } + num = em_send_multi(events, NUM_EVENTS, + q_ctx->sch_q.this_queue); + if (unlikely(num != NUM_EVENTS)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send multi:%d\n" + "Queue:%" PRI_QUEUE "", + num, q_ctx->sch_q.this_queue); + em_free_multi(&events[num], NUM_EVENTS - num); + return; + } + } + } + + perf_shm->test_status.queues = queue_count; + perf_shm->test_status.step++; + + APPL_PRINT("\nNumber of queues: %6.0d + %d\n", + queue_count, queue_count); + if (CONST_NUM_EVENTS) + APPL_PRINT("Number of events: %6.0d + %d\n", + CONST_NUM_EVENTS / 2, CONST_NUM_EVENTS / 2); + else + APPL_PRINT("Number of events: %6.0d + %d\n", + queue_count * NUM_EVENTS, queue_count * NUM_EVENTS); +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo_id = eo; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + return EM_OK; +} + +/** + * @private + * + * EO receive function. + * + * Loops back events and calculates the event rate. + */ +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context) +{ + env_time_t recv_time; + perf_event_t *perf_event; + + if (MEASURE_LATENCY) { + recv_time = env_time_global(); + perf_event = em_event_pointer(event); + } + + queue_context_t *q_ctx; + em_queue_t dst_queue; + em_queue_t src_unsch_queue; + em_queue_t dst_unsch_queue; + em_event_t unsch_event; + em_status_t ret; + int do_return; + + (void)eo_context; + (void)type; + + q_ctx = q_context; + src_unsch_queue = q_ctx->unsch_q.this_queue; + + /* + * Dequeue an unscheduled event for every received scheduled event + */ + unsch_event = em_queue_dequeue(src_unsch_queue); + test_fatal_if(unsch_event == EM_EVENT_UNDEF && !appl_shm->exit_flag, + "em_queue_dequeue() error"); + + /* Free all events if the exit-flag is set (program termination) */ + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + em_free(unsch_event); + return; + } + + /* + * Helper: Update the test state, count recv events, + * calc & print stats, prepare for next step + */ + do_return = update_test_state(event, unsch_event); + if (unlikely(do_return)) + return; + + if (ALLOC_FREE_PER_EVENT) + event = alloc_free_per_event(event); + + dst_queue = q_ctx->sch_q.next_queue; + dst_unsch_queue = q_ctx->unsch_q.next_queue; + test_fatal_if(queue != q_ctx->sch_q.this_queue, "Queue config error"); + + if (MEASURE_LATENCY) + measure_latency(perf_event, q_ctx, recv_time); + + /* Enqueue the unscheduled event to the next unscheduled queue */ + ret = em_send(unsch_event, dst_unsch_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT " Unsched-Q: %" PRI_QUEUE "", + ret, dst_unsch_queue); + } + + /* Send the scheduled event to the next scheduled queue */ + if (MEASURE_LATENCY) + perf_event->send_time = env_time_global(); + ret = em_send(event, dst_queue); + if (unlikely(ret != EM_OK)) { + em_free(event); + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT " Queue:%" PRI_QUEUE "", + ret, dst_queue); + } +} + +/** + * Receive function helper: Update the test state + * + * Calculates the number of received events, maintains & prints test statistics + * and restarts/reconfigures the test for the next queue/event-setup + * + * @return '1' if the caller receive function should immediately return, + * '0' otherwise + */ +static inline int +update_test_state(em_event_t event, em_event_t unsch_event) +{ + uint64_t events; + uint64_t freed_count; + uint64_t ready_count; + const int core = em_core_id(); + test_status_t *const tstat = &perf_shm->test_status; + core_stat_t *const cstat = &perf_shm->core_stat[core]; + + events = cstat->events; + /* one scheduled and one unscheduled event received */ + events += 2; + + if (unlikely(tstat->reset_flag)) { + events = 0; + if (CONST_NUM_EVENTS) { + /* Free all old events before allocating new ones. */ + if (unlikely(tstat->free_flag)) { + em_free(event); + em_free(unsch_event); + freed_count = + env_atomic64_add_return(&tstat->freed_count, 2); + if (freed_count == CONST_NUM_EVENTS) { + /* Last event */ + env_atomic64_set(&tstat->freed_count, + 0); + tstat->reset_flag = 0; + tstat->free_flag = 0; + queue_step(); + } + /* Req caller receive-func to return */ + return 1; + } + } + + if (unlikely(core_state != CORE_STATE_IDLE)) { + core_state = CORE_STATE_IDLE; + cstat->begin_time = ENV_TIME_NULL; + + ready_count = + env_atomic64_add_return(&tstat->ready_count, 1); + + if (ready_count == (uint64_t)tstat->num_cores) { + env_atomic64_set(&tstat->ready_count, 0); + + if (CONST_NUM_EVENTS) { + int sample = tstat->samples; + int queues = tstat->queues; + + if (sample == 0 && queues < NUM_QUEUES) + tstat->free_flag = 1; + else + tstat->reset_flag = 0; + } else { + tstat->reset_flag = 0; + } + } + } + } else if (unlikely(events == 2)) { + cstat->begin_time = env_time_global(); + cstat->latency.events = 0; + cstat->latency.hi_prio_ave = ENV_TIME_NULL; + cstat->latency.hi_prio_max = ENV_TIME_NULL; + cstat->latency.lo_prio_ave = ENV_TIME_NULL; + cstat->latency.lo_prio_max = ENV_TIME_NULL; + + core_state = CORE_STATE_MEASURE; + } else if (unlikely(events == EVENTS_PER_SAMPLE)) { + /* + * Measurements done for this step. Store results and continue + * receiving events until all cores are done. + */ + env_time_t begin_time, end_time; + + cstat->end_time = env_time_global(); + + end_time = cstat->end_time; + begin_time = cstat->begin_time; + cstat->diff_time = env_time_diff(end_time, begin_time); + + ready_count = env_atomic64_add_return(&tstat->ready_count, 1); + + /* + * Check whether all cores are done with the step, + * and if done proceed to the next step + */ + if (unlikely((int)ready_count == tstat->num_cores)) { + /* No real need for atomicity here, ran on last core*/ + env_atomic64_set(&tstat->ready_count, 0); + + tstat->reset_flag = 1; + tstat->samples++; + + /* + * Print statistics. + * Omit prints for the first sample round to allow the + * test to stabilize after setups and teardowns. + */ + if (tstat->samples > 1) { + int print_header = tstat->samples == 2 ? 1 : 0; + + print_test_statistics(tstat, print_header, + perf_shm->core_stat); + } + + /* + * Start next test step - setup new queues + */ + if (tstat->samples == NUM_SAMPLES && + tstat->queues < NUM_QUEUES) { + if (!CREATE_ALL_QUEUES_AT_STARTUP) { + int step = tstat->step; + int first_q = tstat->queues; + int num_qs = queue_steps[step] - + queue_steps[step - 1]; + + create_and_link_queues(first_q, num_qs); + } + + if (!CONST_NUM_EVENTS) + queue_step(); + + tstat->samples = 0; + } + } + } + + cstat->events = events; + + return 0; +} + +/** + * Creates a number of EM queues, associates them with EOs, and links them. + */ +static void +create_and_link_queues(int start_queue, int num_queues) +{ + int i, j; + em_queue_t queue, next_queue; + em_queue_t queue_unscheduled, next_unscheduled; + em_queue_conf_t unsch_conf; + em_queue_prio_t prio; + em_status_t ret; + queue_context_t *q_ctx; + + APPL_PRINT("\nCreate new queues - scheduled:%d + unscheduled:%d\n", + num_queues, num_queues); + + if (num_queues % NUM_EOS != 0) { + APPL_PRINT("%s() arg 'num_queues'=%d not multiple of NUM_EOS=%d\n", + __func__, num_queues, NUM_EOS); + return; + } + + memset(&unsch_conf, 0, sizeof(unsch_conf)); + if (QUEUE_TYPE == EM_QUEUE_TYPE_ATOMIC) { + /* + * If the EO receives are running with an atomic context then + * unsched queue enq/deq can be multithread unsafe to possibly + * boost perf. + */ + unsch_conf.flags |= EM_QUEUE_FLAG_ENQ_NOT_MTSAFE; + unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE; + } + + for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { + next_queue = EM_QUEUE_UNDEF; + next_unscheduled = EM_QUEUE_UNDEF; + + for (j = 0; j < NUM_EOS; j++) { + prio = EM_QUEUE_PRIO_NORMAL; + + if (MEASURE_LATENCY) { + if (j == 0) + prio = EM_QUEUE_PRIO_HIGH; + } + + q_ctx = &perf_shm->queue_context_tbl[i + j]; + + /* + * Create a new scheduled queue + */ + queue = em_queue_create("queue", QUEUE_TYPE, prio, + EM_QUEUE_GROUP_DEFAULT, NULL); + if (queue == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + 2 * i); + return; + } + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "em_queue_set_context:%" PRI_STAT "\n" + "EO:%" PRI_EO " Queue:%" PRI_QUEUE "", + perf_shm->eo[j], queue); + /* Add the scheduled queue to an EO and enable it */ + ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "em_eo_add_queue_sync():%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + em_queue_delete(queue); + return; + } + /* Link scheduled queues */ + q_ctx->sch_q.this_queue = queue; + q_ctx->sch_q.next_queue = next_queue; + q_ctx->sch_q.prio = prio; + + /* + * Create a new unscheduled queue + */ + queue_unscheduled = + em_queue_create("unscheduled_queue", + EM_QUEUE_TYPE_UNSCHEDULED, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, + &unsch_conf); + if (queue_unscheduled == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + 2 * i + 1); + return; + } + + /* Link unscheduled queues */ + q_ctx->unsch_q.this_queue = queue_unscheduled; + q_ctx->unsch_q.next_queue = next_unscheduled; + + /* + * Set the same top level queue context for both the + * scheduled and the unscheduled queue, access queue + * specific context by using q_ctx->sch_q.* or + * q_ctx->unsch_q.* - this eliminates the need to call + * em_queue_get_context() for each event for the + * unscheduled queues + */ + ret = em_queue_set_context(queue_unscheduled, q_ctx); + test_fatal_if(ret != EM_OK, + "em_queue_set_context:%" PRI_STAT "\n" + "Unsched-Q:%" PRI_QUEUE "", + ret, queue_unscheduled); + /* Sanity check */ + test_fatal_if(em_queue_get_context(queue) != + em_queue_get_context(queue_unscheduled), + "em_queue_get_context failed."); + + next_queue = queue; + next_unscheduled = queue_unscheduled; + } + + /* Connect first scheduled queue to the last */ + q_ctx = &perf_shm->queue_context_tbl[i + 0]; + q_ctx->sch_q.next_queue = next_queue; + q_ctx->unsch_q.next_queue = next_unscheduled; + } +} + +/** + * Print test statistics + */ +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]) +{ + const int num_cores = test_status->num_cores; + const uint64_t cpu_hz = test_status->cpu_hz; + const double cpu_mhz = test_status->cpu_mhz; + const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; + const uint64_t print_count = test_status->print_count++; + env_time_t total_time = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) + total_time = env_time_sum(total_time, core_stat[i].diff_time); + + double cycles_per_event = 0.0; + double events_per_sec = 0.0; + + if (likely(total_events > 0)) + cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / + (double)total_events; + if (likely(cycles_per_event > 0)) /* Million events/s: */ + events_per_sec = cpu_mhz * num_cores / cycles_per_event; + + /* + * Print without latency statistics + */ + if (!MEASURE_LATENCY) { + if (print_header) + APPL_PRINT(RESULT_PRINTF_HDR); + APPL_PRINT(RESULT_PRINTF_FMT, + cycles_per_event, events_per_sec, + cpu_mhz, print_count); + return; + } + + /* + * Print with latency statistics + */ + uint64_t latency_events = 0; + env_time_t latency_hi_ave = ENV_TIME_NULL; + env_time_t latency_hi_max = ENV_TIME_NULL; + env_time_t latency_lo_ave = ENV_TIME_NULL; + env_time_t latency_lo_max = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) { + latency_events += core_stat[i].latency.events; + + latency_hi_ave = env_time_sum(latency_hi_ave, + core_stat[i].latency.hi_prio_ave); + latency_lo_ave = env_time_sum(latency_lo_ave, + core_stat[i].latency.lo_prio_ave); + + if (env_time_cmp(core_stat[i].latency.hi_prio_max, + latency_hi_max) > 0) { + latency_hi_max = core_stat[i].latency.hi_prio_max; + } + if (env_time_cmp(core_stat[i].latency.lo_prio_max, + latency_lo_max) > 0) { + latency_lo_max = core_stat[i].latency.lo_prio_max; + } + } + + double lat_per_hi_ave = 0.0; + double lat_per_lo_ave = 0.0; + + if (likely(latency_events > 0)) { + lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / + (double)latency_events; + lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / + (double)latency_events; + } + + if (print_header) + APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); + APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, + cycles_per_event, events_per_sec, lat_per_hi_ave, + env_time_to_cycles(latency_hi_max, cpu_hz), + lat_per_lo_ave, + env_time_to_cycles(latency_lo_max, cpu_hz), + cpu_mhz, print_count); +} + +/** + * Free the input event and allocate a new one instead + */ +static inline em_event_t +alloc_free_per_event(em_event_t event) +{ + perf_event_t *perf_event = em_event_pointer(event); + env_time_t send_time = perf_event->send_time; + int seq = perf_event->seq; + uint32_t event_size = em_event_get_size(event); + + em_free(event); + + event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); + + perf_event = em_event_pointer(event); + + perf_event->send_time = send_time; + perf_event->seq = seq; + + return event; +} + +/** + * Measure the scheduling latency per event + */ +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time) +{ + const int core = em_core_id(); + core_stat_t *const cstat = &perf_shm->core_stat[core]; + const env_time_t send_time = perf_event->send_time; + env_time_t latency; + + if (perf_shm->test_status.reset_flag || + cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) + return; + + cstat->latency.events++; + + latency = env_time_diff(recv_time, send_time); + + if (q_ctx->sch_q.prio == EM_QUEUE_PRIO_HIGH) { + cstat->latency.hi_prio_ave = + env_time_sum(cstat->latency.hi_prio_ave, latency); + if (env_time_cmp(latency, cstat->latency.hi_prio_max) > 0) + cstat->latency.hi_prio_max = latency; + } else { + cstat->latency.lo_prio_ave = + env_time_sum(cstat->latency.lo_prio_ave, latency); + if (env_time_cmp(latency, cstat->latency.lo_prio_max) > 0) + cstat->latency.lo_prio_max = latency; + } +} diff --git a/programs/performance/scheduling_latency.c b/programs/performance/scheduling_latency.c new file mode 100644 index 00000000..de0ba309 --- /dev/null +++ b/programs/performance/scheduling_latency.c @@ -0,0 +1,607 @@ +/* + * Copyright (c) 2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test for scheduling + * + * Measures time consumed during an event send-receive and/or achieved + * throughout. + * Command line arguments can be used to try different setups. + */ +#include +#include +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" +#include "scheduling_latency.h" +#include "event_machine/helper/event_machine_debug.h" + +static em_status_t error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); +static em_status_t start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t stop(void *eo_context, em_eo_t eo); +static void receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context); + +static void update_stats(int64_t ts, int64_t diff, uint64_t count); +static void print_stats(int64_t start_time, int64_t loop_start_time, uint64_t count); +static int parse_args(int first, int argc, char *argv[]); +static void usage(void); +static void do_work(test_msg *msg); +static int64_t mask_from_str(const char *optarg, em_core_mask_t *mask); +static const char *queue_type_str(em_queue_type_t type); +static void entry_hook(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx); +static void exit_hook(em_eo_t eo); +static uint64_t try_timestamp_overhead(void); + +/* data */ +static perf_shm_t *perf_shm; +static __thread uint64_t entry_ts; /* core local */ +static __thread int64_t max_eo_time; /* core local */ + +config_data g_options = { + .loops = 1, + .queue_type = EM_QUEUE_TYPE_ATOMIC, + .lo_events = 0, + .work_ns = 2000, + .atomic_end = false, + .eo_receive = false +}; + +/*************************************************************************/ +void entry_hook(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx) +{ + (void)eo; + (void)eo_ctx; + (void)events; + (void)num; + (void)queue; + (void)q_ctx; + + entry_ts = odp_time_global_strict_ns(); +} + +void exit_hook(em_eo_t eo) +{ + (void)eo; + + int64_t diff = odp_time_global_strict_ns() - entry_ts; + + if (max_eo_time < diff) + max_eo_time = diff; +} + +const char *queue_type_str(em_queue_type_t type) +{ + switch (type) { + case EM_QUEUE_TYPE_PARALLEL: + return "PARALLEL"; + break; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + return "ORDERED"; + break; + case EM_QUEUE_TYPE_ATOMIC: + return "ATOMIC"; + break; + default: + break; + } + return ""; +} + +int64_t mask_from_str(const char *hex, em_core_mask_t *cmask) +{ + uint64_t mask; + + if (hex == NULL) + return 0; + if (sscanf(hex, "%lx", &mask) != 1) + return 0; + + em_core_mask_set_bits(&mask, 1, cmask); + return em_core_mask_count(cmask); +} + +void update_stats(int64_t now, int64_t diff, uint64_t count) +{ + /* dispatch time */ + int64_t dtime = now - em_debug_timestamp(EM_DEBUG_TSP_SCHED_RETURN) - perf_shm->ts_overhead; + + /* send-receive */ + if (diff < perf_shm->times.mint) { + perf_shm->times.mint = diff; + perf_shm->times.minnum = count; + } else if (diff > perf_shm->times.maxt) { + perf_shm->times.maxt = diff; + perf_shm->times.maxnum = count; + perf_shm->times.maxdisp = dtime; + } + perf_shm->times.sum += diff; + + /* dispatch overhead min/max */ + if (dtime < perf_shm->times.disp_min) + perf_shm->times.disp_min = dtime; + else if (dtime > perf_shm->times.disp_max) + perf_shm->times.disp_max = dtime; + + /* EO receive time. Only includes the timing event processing */ + if (g_options.eo_receive && perf_shm->times.max_eo_time < max_eo_time) + perf_shm->times.max_eo_time = max_eo_time; +} + +void do_work(test_msg *msg) +{ + msg->count++; + if (msg->work == 0) + return; + + uint64_t t1 = odp_time_global_strict_ns(); + + /* just loop for given time */ + while ((odp_time_global_ns() - t1) < msg->work) + ; +} + +void print_stats(int64_t start_time, int64_t loop_start_time, uint64_t count) +{ + double period = (double)odp_time_global_ns() - loop_start_time; + double runtime = (double)odp_time_global_ns() - start_time; + + period /= 1000000000; /* sec */ + runtime /= 1000000000; + + double rate = ((count - perf_shm->stat_mcount) / period) / 1000000; /* M/sec */ + uint64_t average = perf_shm->times.sum / (count - START_EVENTS); + + if (em_debug_timestamp(EM_DEBUG_TSP_SCHED_RETURN) == 0) + perf_shm->times.maxdisp = 0; + + APPL_PRINT(": time(h) cores events(M) rate(M/s) min[ns] max[ns] avg[ns] min ev# max ev# max_do[ns] max_eo[ns]\n"); + APPL_PRINT(": %-7.3f %-5d %-9lu %-9.3f %-7lu %-7lu %-7lu %-11lu %-11lu %-10lu %lu\n", + runtime / (60 * 60), em_core_count(), count / 1000000, rate, + perf_shm->times.mint, perf_shm->times.maxt, average, + perf_shm->times.minnum, perf_shm->times.maxnum, + perf_shm->times.maxdisp, perf_shm->times.max_eo_time); + + if (g_options.lo_events) { + double lrate = ((perf_shm->num_lo - perf_shm->stat_lcount) / period) / 1000000; + + APPL_PRINT(": bg events(M) rate(M/s)\n"); + APPL_PRINT(": %-12.3f %.3f\n", ((double)perf_shm->num_lo) / 1000000, lrate); + perf_shm->stat_lcount = perf_shm->num_lo; + } + perf_shm->stat_mcount = count; +} + +void usage(void) +{ + APPL_PRINT("scheduling_latency %s\n\n%s", VERSION, instructions); + + for (int i = 0; ; i++) { + if (longopts[i].name == NULL) + break; + APPL_PRINT("-%c or --%-16s %s\n", longopts[i].val, longopts[i].name, descopts[i]); + } + APPL_PRINT("\n"); +} + +int parse_args(int first, int argc, char *argv[]) +{ + em_core_mask_zero(&g_options.hgroup); + em_core_mask_zero(&g_options.lgroup); + + optind = first + 1; /* skip '--' */ + while (1) { + int opt; + int long_index; + char *endptr; + int64_t num; + + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 'a': { + g_options.atomic_end = true; + } + break; + case 'r': { + g_options.eo_receive = true; + } + break; + case 'l': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.loops = (uint64_t)num; + } + break; + case 'q': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + switch (num) { + case 0: + g_options.queue_type = EM_QUEUE_TYPE_PARALLEL; + break; + case 1: + g_options.queue_type = EM_QUEUE_TYPE_ATOMIC; + break; + case 2: + g_options.queue_type = EM_QUEUE_TYPE_PARALLEL_ORDERED; + break; + + default: return 0; + } + } + break; + case 'e': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.lo_events = (uint64_t)num; + } + break; + case 'w': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.work_ns = (uint64_t)num; + } + break; + case 'g': { /* low-prio grp */ + num = mask_from_str(optarg, &g_options.lgroup); + if (!num) + return 0; + } + break; + case 't': { /* low-prio grp */ + num = mask_from_str(optarg, &g_options.hgroup); + if (!num) + return 0; + } + break; + case 'h': + default: + opterr = 0; + usage(); + return 0; + } + } + + optind = 1; /* cm_setup() to parse again */ + return 1; +} + +em_status_t error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) +{ + return test_error_handler(eo, error, escope, args); +} + +void test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfSharedMem", sizeof(perf_shm_t)); + em_register_error_handler(error_handler); + mlockall(MCL_FUTURE); /* make sure all memory is mapped at start */ + } else { + perf_shm = env_shared_lookup("PerfSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Test init failed on EM-core: %u\n", + core); + else if (core == 0) { + memset(perf_shm, 0, sizeof(perf_shm_t)); + APPL_PRINT("%luB Shared memory initialized\n", sizeof(perf_shm_t)); + } +} + +uint64_t try_timestamp_overhead(void) +{ +#define NUM_TS_TRY 5 + uint64_t oh = UINT64_MAX; + + /* measure time stamping overhead, take min */ + for (int i = 0; i < NUM_TS_TRY; i++) { + uint64_t t1 = odp_time_global_ns(); + uint64_t t2 = odp_time_global_ns(); + uint64_t t3 = odp_time_global_ns(); + + if (t2 - t1 < oh) + oh = t2 - t1; + if (t3 - t2 < oh) + oh = t3 - t2; + } + + return oh; +} + +void test_start(appl_conf_t *const appl_conf) +{ + (void)appl_conf; + + perf_shm->ts_overhead = (int64_t)try_timestamp_overhead(); + APPL_PRINT("odp_time_global_ns pair overhead seems to be %lu ns\n", perf_shm->ts_overhead); + + /* Create EO */ + perf_shm->eo = em_eo_create("perf test eo", start, NULL, stop, NULL, receive_func, + &perf_shm->eo_ctx); + test_fatal_if(perf_shm->eo == EM_EO_UNDEF, "EO create failed"); + + /* Queues */ + em_queue_group_t grp = EM_QUEUE_GROUP_DEFAULT; + char buf[32]; + + APPL_PRINT("Using queue type %d (%s) for timing\n", + (int)g_options.queue_type, queue_type_str(g_options.queue_type)); + + if (em_core_mask_count(&g_options.hgroup)) { /* separate queue group for timing */ + grp = em_queue_group_create_sync("HGRP", &g_options.hgroup); + test_fatal_if(grp == EM_QUEUE_GROUP_UNDEF, "Can't create hi-prio queue group!"); + em_core_mask_tostr(buf, 32, &g_options.hgroup); + APPL_PRINT("Coremask for hi-prio events: %s (%d cores)\n", + buf, em_core_mask_count(&g_options.hgroup)); + } else { + APPL_PRINT("Using default queue group for hi-prio\n"); + } + + em_queue_t q = em_queue_create("testQ", g_options.queue_type, + EM_QUEUE_PRIO_HIGHEST, EM_QUEUE_GROUP_DEFAULT, NULL); + + test_fatal_if(q == EM_QUEUE_UNDEF, "Q create fail"); + em_eo_add_queue_sync(perf_shm->eo, q); + + /* Low priority background work queue */ + grp = EM_QUEUE_GROUP_DEFAULT; + if (em_core_mask_count(&g_options.lgroup)) { /* separate queue group for background */ + grp = em_queue_group_create_sync("LGRP", &g_options.lgroup); + test_fatal_if(grp == EM_QUEUE_GROUP_UNDEF, "Can't create lower-prio queue group!"); + em_core_mask_tostr(buf, 32, &g_options.lgroup); + APPL_PRINT("Coremask for background events: %s (%d cores)\n", + buf, em_core_mask_count(&g_options.lgroup)); + } else { + APPL_PRINT("Using default queue group for background events\n"); + } + + em_core_mask_t mask; + + em_core_mask_and(&mask, &g_options.lgroup, &g_options.hgroup); + APPL_PRINT("Queue groups are %soverlapping\n", em_core_mask_count(&mask) ? "" : "not "); + + em_queue_t q2 = em_queue_create("testQlo", EM_QUEUE_TYPE_PARALLEL, EM_QUEUE_PRIO_NORMAL, + grp, NULL); + + test_fatal_if(q2 == EM_QUEUE_UNDEF, "Q create fail"); + em_eo_add_queue_sync(perf_shm->eo, q2); + + if (g_options.lo_events) + APPL_PRINT("Backround work: %lu normal priority events with %.2fus work\n", + g_options.lo_events, g_options.work_ns / 1000.0); + + if (g_options.atomic_end) + APPL_PRINT("Using atomic_processing_end()\n"); + + if (g_options.eo_receive) { + em_status_t stat = em_dispatch_register_enter_cb(entry_hook); + + test_fatal_if(stat != EM_OK, "entry_hook() register failed!"); + stat = em_dispatch_register_exit_cb(exit_hook); + test_fatal_if(stat != EM_OK, "exit_hook() register failed!"); + APPL_PRINT("entry/exit hooks registered (expect a bit more latency)\n"); + } + + /* Start EOs */ + em_status_t start_ret = EM_ERR, ret; + + perf_shm->eo_ctx.test_q = q; + perf_shm->eo_ctx.loprio_q = q2; + perf_shm->eo_ctx.stopping = false; + + ret = em_eo_start_sync(perf_shm->eo, &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start:%" PRI_STAT " %" PRI_STAT "", ret, start_ret); + APPL_PRINT("Starting %lu loops\n", g_options.loops); +} + +void test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* Stop EOs */ + eo = perf_shm->eo; + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + + /* Remove and delete all of the EO's queues, then delete the EO */ + eo = perf_shm->eo; + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); +} + +void test_term(void) +{ + APPL_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); + em_unregister_error_handler(); + env_shared_free(perf_shm); +} + +em_status_t start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting\n", eo); + + if (em_debug_timestamp(EM_DEBUG_TSP_SCHED_RETURN) == 0) /* could be disabled */ + APPL_PRINT("Dispatch timestamps (max_do) NOT available\n"); + + for (uint64_t i = 0; i < g_options.lo_events; i++) { + em_event_t ev = em_alloc(sizeof(test_msg), EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + + test_fatal_if(ev == EM_EVENT_UNDEF, "Event alloc fail"); + test_msg *msg = em_event_pointer(ev); + + msg->count = 0; + msg->work = g_options.work_ns; + msg->magic = BACKGROUND_MAGIC; + em_status_t ret = em_send(ev, ((app_eo_ctx *)eo_context)->loprio_q); + + test_fatal_if(ret != EM_OK, "Send fail"); + } + APPL_PRINT("Sent %lu bg events\n", g_options.lo_events); + + perf_shm->times.mint = INT64_MAX; + perf_shm->times.disp_min = INT64_MAX; + + em_event_t ev = em_alloc(sizeof(test_msg), EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + + test_fatal_if(ev == EM_EVENT_UNDEF, "Event alloc fail"); + test_msg *msg = em_event_pointer(ev); + + msg->count = 0; + msg->magic = TIMING_MAGIC; + msg->ts = odp_time_global_ns(); + return em_send(ev, ((app_eo_ctx *)eo_context)->test_q); +} + +em_status_t stop(void *eo_context, em_eo_t eo) +{ + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping\n", eo); + return EM_OK; +} + +void receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context) +{ + uint64_t ts_ns = odp_time_global_strict_ns(); /* first thing to do*/ + app_eo_ctx *ctx = (app_eo_ctx *)eo_context; + test_msg *msg = em_event_pointer(event); + em_status_t ret; + + (void)type; + (void)q_context; + + /* shutdown? */ + if (unlikely(ctx->stopping)) { + em_free(event); + return; + } + + /* background work? */ + if (unlikely(queue == ctx->loprio_q)) { + do_work(msg); + perf_shm->num_lo++; /* atomic_uint */ + ret = em_send(event, ctx->loprio_q); + test_fatal_if(ret != EM_OK, "Event send fail, ret=%u, #=%lu", + (unsigned int)ret, msg->count); + return; + } + + test_fatal_if(msg->magic != TIMING_MAGIC, "Unexpected event, magic fail (%x/%x)", + msg->magic, TIMING_MAGIC); + test_fatal_if(queue != ctx->test_q, "Timing event from wrong Q??"); + + /* timing event, maintain min/max/avg latency */ + int64_t diff = ts_ns - msg->ts - perf_shm->ts_overhead; + + if (unlikely(msg->count < START_EVENTS)) { /* ignore first ones */ + perf_shm->start_time = odp_time_global_ns(); + perf_shm->loop_start_time = perf_shm->start_time; + } else { + update_stats(ts_ns, diff, msg->count); + + if (g_options.atomic_end) + em_atomic_processing_end(); + + /* reporting period */ + if (unlikely(!(msg->count % REPORT_PERIOD) && msg->count)) { + print_stats(perf_shm->start_time, perf_shm->loop_start_time, msg->count); + perf_shm->loopcount++; + if (perf_shm->loopcount >= g_options.loops && g_options.loops) { + ctx->stopping = true; + em_free(event); + raise(SIGINT); + return; + } + perf_shm->loop_start_time = odp_time_global_ns(); + } + } + + msg->count++; + msg->ts = odp_time_global_strict_ns(); + ret = em_send(event, ctx->test_q); + test_fatal_if(ret != EM_OK, "Event send fail"); +} + +int main(int argc, char *argv[]) +{ + /* pick app-specific arguments after '--' */ + int i; + + for (i = 1; i < argc; i++) { + if (!strcmp(argv[i], "--")) + break; + } + if (i < argc) { + if (!parse_args(i, argc, argv)) { + APPL_PRINT("Invalid application arguments\n"); + return 1; + } + } + + return cm_setup(argc, argv); +} diff --git a/programs/performance/scheduling_latency.h b/programs/performance/scheduling_latency.h new file mode 100644 index 00000000..5c0b5cbf --- /dev/null +++ b/programs/performance/scheduling_latency.h @@ -0,0 +1,119 @@ +#include +#include +#include + +#define VERSION "v0.1 WIP" +#define START_EVENTS 100000 /* ignore first events for startup prints to finish */ +#define REPORT_PERIOD 10000000 + +#define TIMING_MAGIC 0xCAFEBEEF0000CAFE +#define BACKGROUND_MAGIC 0xBEEFBEEFCAFECAFE + +void test_init(void); + +/* EO context */ +typedef struct app_eo_ctx { + em_queue_t test_q; + em_queue_t loprio_q; + bool stopping; +} app_eo_ctx; + +/* cmdline options */ +typedef struct config_data { + uint64_t loops; + em_queue_type_t queue_type; + uint64_t lo_events; + uint64_t work_ns; + em_core_mask_t lgroup; + em_core_mask_t hgroup; + bool atomic_end; + bool eo_receive; + +} config_data; + +/* shared memory data */ +typedef struct perf_shm_t { + atomic_uint_fast64_t num_lo ODP_ALIGNED_CACHE; + + struct { + int64_t mint; /* min ns */ + uint64_t minnum; /* event # */ + int64_t maxt; /* max ns */ + uint64_t maxnum; /* event # */ + int64_t maxdisp; /* max dispatch overhead ns */ + int64_t sum; /* for average */ + int64_t disp_min; /* min dispatch ns , odp->eo */ + int64_t disp_max; /* max ns */ + int64_t max_eo_time; /* max EO receive */ + } times ODP_ALIGNED_CACHE; + + em_eo_t eo; + app_eo_ctx eo_ctx; + int64_t ts_overhead; /* stored timestamp overhead */ + + uint64_t loopcount ODP_ALIGNED_CACHE; + int64_t start_time; + int64_t loop_start_time; + uint64_t stat_mcount; + uint64_t stat_lcount; + +} perf_shm_t; + +typedef struct test_msg { + uint64_t count; + uint64_t ts; + uint64_t work; + uint64_t magic; +} test_msg; + +const struct option longopts[] = { + {"loops", required_argument, NULL, 'l'}, + {"levents", required_argument, NULL, 'e'}, + {"work", required_argument, NULL, 'w'}, + {"lgroup", required_argument, NULL, 'g'}, + {"hgroup", required_argument, NULL, 't'}, + {"atomic-end", no_argument, NULL, 'a'}, + {"eo-receive", no_argument, NULL, 'r'}, + {"queue-type", required_argument, NULL, 'q'}, + + {"help", no_argument, NULL, 'h'}, + {NULL, 0, NULL, 0} +}; + +const char *shortopts = "l:e:hw:g:t:arq:"; +/* descriptions for above options, keep in sync! */ +const char *descopts[] = { + "Number of measurement cycles (10M events each)", + "Number of lower priority background events (default 0)", + "Amount of time spent for each background event (ns, default 2000)", + "Coremask for background events, default all (hex, EM core ids)", + "Coremask for hi-prio timing events, default all (hex, EM core ids)", + "Use atomic_processing_end (default no)", + "Include measuring EO receive time (causes extra latency)", + "Queue type for hi-priority. 0=parallel, 1=atomic (default), 2=ordered", + "Print usage and exit", + NULL +}; + +const char *instructions = +"Simple scheduling latency test. Creates one hi-priority queue and sends one pre-allocated\n" +"event to it. Time from em_send to EO receive is measured and then the event is sent again.\n" +"The reported event rate is the latency limited rate, but also includes test application\n" +"overhead (use -r to check EO time).\n" +"Optionally background event load can be added to another parallel queue (-e).\n" +"By default all cores are load balanced with both queues.\n" +"Note, that when running multiple loops (-l) especially the EO receive time (-r)\n" +"may be affected by the printing happening at every loop cycle end.\n" +"\n" +"time(h) test runtime\n" +"cores # cores\n" +"events(M) # test events\n" +"rate(M/s) test events per sec\n" +"min[ns] minimum latency\n" +"max[ns] maximum latency\n" +"avg[ns] average latency\n" +"min ev# event sequence number of min latency\n" +"max ev# event sequence number of max latency\n" +"max_do[ns] maximum dispatcher overhead (odp sched -> EO). 0 means debug is disabled\n" +"max_eo[ns] maximum EO receive. 0 means disabled\n" +"\n"; diff --git a/programs/performance/send_multi.c b/programs/performance/send_multi.c index 9cd16af5..825868d1 100644 --- a/programs/performance/send_multi.c +++ b/programs/performance/send_multi.c @@ -1,1301 +1,1301 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine performance test for burst sending of events. - * (based on the queues_unscheduled.c test and extends it to use burst - * sending of events into the next queue, see em_send_multi() & - * em_queue_dequeue_multi()) - * - * Measures the average cycles consumed during an event send-sched-receive loop - * for a certain number of queues and events in the system. The test increases - * the number of queues[+events] for each measurement round and prints the - * results. The test will stop if the maximum number of supported queues by the - * system is reached. - * - * Each normal scheduled queue is accompanied by an unscheduled queue. Received - * events are stored until a suitable length event burst is available, then the - * whole burst is forwarded to the next queue in the chain using - * em_send_multi(). Each stored burst is accompanied by another burst taken - * from the associated unscheduled queue. - * Both the received scheduled events and the unscheduled dequeued events are - * sent as bursts to the next queue at the end of the receive function. - * - * The measured cycles contain the scheduled event send_multi-sched-receive - * cycles as well as the unscheduled event multi_dequeue. - * - * Plot the cycles/event to get an idea of how the system scales with an - * increasing number of queues. - */ - -#include -#include -#include - -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -/* - * Test options: - */ - -/* Alloc and free per event */ -#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ - -/* - * Create all EM queues at startup or create the queues during - * the test in steps. - */ -#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ - -/* - * Measure the send-enqueue-schedule-receive latency. Measured separately for - * 'high priority and 'low priority' queues (ratio 1:4). - */ -#define MEASURE_LATENCY 1 /* false=0 or true=1 */ - -/* - * Keep the number of events constant while increasing the number of queues. - * Should be dividable by or factor of queue_step. - */ -#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ - -/* - * Test configuration: - */ - -#define MAX_CORES 64 - -/* Number of EO's and scheduled queues in a loop */ -#define NUM_EOS 4 - -/* Number of events per queue */ -#define NUM_EVENTS 4 - -#if CONST_NUM_EVENTS > 0 -/* - * Total number of queues when using a constant number of events. - * Make sure that all queues, both scheduled and unscheduled (hence /2), - * get 'NUM_EVENTS' events per queue. - */ -#define NUM_QUEUES ((CONST_NUM_EVENTS / NUM_EVENTS) / 2) -#else -/* - * Total number of queues when increasing the total event count for each queue - * step. - */ -#define NUM_QUEUES (NUM_EOS * 16 * 1024) -#endif - -/* Number of events to collect before sending out with send-multi */ -#define NUM_STORAGE 4 - -/* Number of data bytes in an event */ -#define DATA_SIZE 128 - -/* Samples before adding more queues */ -#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ - -/* Num events a core processes between samples */ -#define EVENTS_PER_SAMPLE 0x400000 - -/* EM queue type */ -#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC - -/* Core states during test. */ -#define CORE_STATE_MEASURE 0 -#define CORE_STATE_IDLE 1 - -/* Result APPL_PRINT() format string */ -#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" -#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" - -/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ -#define RESULT_PRINTF_LATENCY_HDR \ -"Cycles/ Events/ Latency:\n" \ -" Event Sec hi-ave hi-max lo-ave lo-max cpu-freq\n" -#define RESULT_PRINTF_LATENCY_FMT \ -"%6.0f %7.2f M %8.0f %7" PRIu64 " %7.0f %7" PRIu64 " %5.0f MHz %" PRIu64 "\n" - -/* - * The number of scheduled queues to use in each test step. - * Additional unscheduled queues are also created for each step. - * - * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of - * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop - * before reaching the end of the list). - */ -static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, - 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; - -/** - * Test state, - * cache line alignment and padding handled in 'perf_shm_t' - */ -typedef struct { - int queues; - int step; - int samples; - int num_cores; - int reset_flag; - double cpu_mhz; - uint64_t cpu_hz; - uint64_t print_count; - env_atomic64_t ready_count; - /* if using CONST_NUM_EVENTS:*/ - int free_flag; - env_atomic64_t freed_count; -} test_status_t; - -/** - * Performance test statistics (per core) - */ -typedef struct { - uint64_t events; - env_time_t begin_time; - env_time_t end_time; - env_time_t diff_time; - struct { - uint64_t events; - env_time_t hi_prio_ave; - env_time_t hi_prio_max; - env_time_t lo_prio_ave; - env_time_t lo_prio_max; - } latency; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} core_stat_t; - -COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, - CORE_STAT_SIZE_ERROR); - -/** - * EO context data - */ -typedef struct { - em_eo_t eo_id; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} eo_context_t; - -COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, - EO_CONTEXT_T__SIZE_ERROR); - -/** - * Queue context data - * Keep the scheduled queue context and the associated unscheduled queue - * context data in the same cache line for faster access - also eliminates - * the need to call em_queue_get_context() for the unscheduled queues. - */ -typedef struct { - struct scheduled_queue_context { - /** This queue */ - em_queue_t this_queue; - /** Event storage */ - em_queue_t storage; - /** Next queue */ - em_queue_t next_queue; - /** Priority of 'this_queue' */ - em_queue_prio_t prio; - /** Number of events enqueued in 'storage' */ - int stored_count; - } sch_q; - - struct unscheduled_queue_context { - /** This unscheduled queue */ - em_queue_t this_queue; - /** Next unscheduled queue */ - em_queue_t next_queue; - } unsch_q; - - /** Pad to multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_context_t; - -COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, - QUEUE_CONTEXT_SIZE_ERROR); - -/** - * Performance test event - */ -typedef struct { - /* Send time stamp */ - env_time_t send_time; - /* Sequence number */ - int seq; - /* Test data */ - uint8_t data[DATA_SIZE]; -} perf_event_t; - -/** - * Test shared memory - */ -typedef struct { - /* Event pool used by this application */ - em_pool_t pool; - - test_status_t test_status ENV_CACHE_LINE_ALIGNED; - - core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; - - eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; - - queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; - /* EO ID's */ - em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; -} perf_shm_t; - -COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, - PERF_SHM_T__SIZE_ERROR); - -/* EM-core local pointer to shared memory */ -static ENV_LOCAL perf_shm_t *perf_shm; - -/* EM-core local state */ -static ENV_LOCAL int core_state = CORE_STATE_MEASURE; - -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); - -static void -queue_step(void); - -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); - -static em_status_t -stop(void *eo_context, em_eo_t eo); - -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context); - -static int -update_test_state(em_event_t event, em_event_t unsch_event); - -static void -create_and_link_queues(int start_queue, int num_queues); - -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]); - -static inline em_event_t -alloc_free_per_event(em_event_t event); - -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time); - -/** - * Main function - * - * Call cm_setup() to perform test & EM setup common for all the - * test applications. - * - * cm_setup() will call test_init() and test_start() and launch - * the EM dispatch loop on every EM-core. - */ -int main(int argc, char *argv[]) -{ - return cm_setup(argc, argv); -} - -/** - * Test error handler - * - * @param eo Execution object id - * @param error The error code - * @param escope Error scope - * @param args List of arguments (__FILE__, __func__, __LINE__, - * (format), ## __VA_ARGS__) - * - * @return The original error code. - */ -static em_status_t -error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) -{ - if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { - APPL_PRINT("\nUnable to create more queues\n\n" - "Test finished\n"); - raise(SIGINT); - return error; - } - - if (appl_shm->exit_flag && EM_ESCOPE(escope) && - !EM_ERROR_IS_FATAL(error)) { - /* Suppress non-fatal EM-error logs during tear-down */ - if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { - APPL_PRINT("\nExit: suppress queue setup error\n\n"); - return error; - } - } - - return test_error_handler(eo, error, escope, args); -} - -/** - * Init of the Queues performance test application. - * - * @attention Run on all cores. - * - * @see cm_setup() for setup and dispatch. - */ -void -test_init(void) -{ - int core = em_core_id(); - - if (core == 0) { - perf_shm = env_shared_reserve("PerfQueuesSharedMem", - sizeof(perf_shm_t)); - em_register_error_handler(error_handler); - } else { - perf_shm = env_shared_lookup("PerfQueuesSharedMem"); - } - - if (perf_shm == NULL) - test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, - "Perf test queues init failed on EM-core: %u\n", - em_core_id()); - else if (core == 0) - memset(perf_shm, 0, sizeof(perf_shm_t)); -} - -/** - * Startup of the Queues performance test application. - * - * @attention Run only on EM core 0. - * - * @param appl_conf Application configuration - * - * @see cm_setup() for setup and dispatch. - */ -void -test_start(appl_conf_t *const appl_conf) -{ - eo_context_t *eo_ctx; - em_status_t ret, start_ret = EM_ERROR; - const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); - int i; - - /* - * Store the event pool to use, use the EM default pool if no other - * pool is provided through the appl_conf. - */ - if (appl_conf->num_pools >= 1) - perf_shm->pool = appl_conf->pools[0]; - else - perf_shm->pool = EM_POOL_DEFAULT; - - APPL_PRINT("\n" - "***********************************************************\n" - "EM APPLICATION: '%s' initializing:\n" - " %s: %s() - EM-core:%i\n" - " Application running on %d EM-cores (procs:%d, threads:%d)\n" - " using event pool:%" PRI_POOL "\n" - " Max. NUM_QUEUES: %i\n" - " sizeof queue_context_tbl: %i kB\n" - "***********************************************************\n" - "\n", - appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), - em_core_count(), - appl_conf->num_procs, appl_conf->num_threads, - perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); - - test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, - "Undefined application event pool!"); - - perf_shm->test_status.cpu_hz = env_core_hz(); - perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / - 1000000.0; - perf_shm->test_status.reset_flag = 0; - perf_shm->test_status.num_cores = em_core_count(); - perf_shm->test_status.free_flag = 0; - - env_atomic64_init(&perf_shm->test_status.ready_count); - env_atomic64_init(&perf_shm->test_status.freed_count); - - /* Create EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo_ctx = &perf_shm->eo_context_tbl[i]; - perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, - stop, NULL, receive_func, - eo_ctx); - test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, - "EO create failed:%d", i, NUM_EOS); - } - - APPL_PRINT(" EOs created\n"); - - /* - * Create and link queues - */ - if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ - create_and_link_queues(0, NUM_QUEUES); - else /* Create queues for the first step, then more before each step */ - create_and_link_queues(0, queue_steps[0]); - - /* Start EOs */ - for (i = 0; i < NUM_EOS; i++) { - ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); - test_fatal_if(ret != EM_OK || start_ret != EM_OK, - "EO start(%d):%" PRI_STAT " %" PRI_STAT "", - i, ret, start_ret); - } - - queue_step(); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_eo_t eo; - em_status_t ret; - int i; - - (void)appl_conf; - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - /* Stop & delete EOs */ - for (i = 0; i < NUM_EOS; i++) { - eo = perf_shm->eo[i]; - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " stop:%" PRI_STAT "", - eo, ret); - - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, - "EO:%" PRI_EO " delete:%" PRI_STAT "", - eo, ret); - } - - /* Delete the unscheduled queues */ - for (i = 0; i < NUM_QUEUES; i++) { - queue_context_t *q_ctx = &perf_shm->queue_context_tbl[i]; - em_queue_t unsch_queue = q_ctx->unsch_q.this_queue; - em_event_t unsch_event; - - if (unsch_queue == EM_QUEUE_UNDEF) - continue; - - for (;;) { - unsch_event = em_queue_dequeue(unsch_queue); - if (unsch_event == EM_EVENT_UNDEF) - break; - em_free(unsch_event); - } - ret = em_queue_delete(unsch_queue); - test_fatal_if(ret != EM_OK, - "Unsch-Queue:%" PRI_QUEUE " delete:%" PRI_STAT "", - unsch_queue, ret); - } - /* Delete the unscheduled 'storage' queues */ - for (i = 0; i < NUM_QUEUES; i++) { - queue_context_t *q_ctx = &perf_shm->queue_context_tbl[i]; - em_queue_t unsch_queue = q_ctx->sch_q.storage; - em_event_t unsch_event; - - if (unsch_queue == EM_QUEUE_UNDEF) - continue; - - for (;;) { - unsch_event = em_queue_dequeue(unsch_queue); - if (unsch_event == EM_EVENT_UNDEF) - break; - em_free(unsch_event); - } - ret = em_queue_delete(unsch_queue); - test_fatal_if(ret != EM_OK, - "Unsch-Queue:%" PRI_QUEUE " delete:%" PRI_STAT "", - unsch_queue, ret); - } -} - -void -test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (core == 0) { - env_shared_free(perf_shm); - em_unregister_error_handler(); - } -} - -/** - * Allocate, initialize and send test step events. - */ -static void -queue_step(void) -{ - queue_context_t *q_ctx; - perf_event_t *perf_event; - em_status_t ret; - const int first = perf_shm->test_status.queues; - const int step = perf_shm->test_status.step; - const int queue_count = queue_steps[step]; - int i, j; - - /* Allocate and send test events for the queues */ - if (CONST_NUM_EVENTS) { - for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { - em_event_t unsch_event; - - unsch_event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(unsch_event == EM_EVENT_UNDEF, - "EM alloc failed (%i)", i); - - /* Allocate events evenly to the queues */ - q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; - - ret = em_send(unsch_event, q_ctx->unsch_q.this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Unsched-Q:%" PRI_QUEUE "", - ret, q_ctx->unsch_q.this_queue); - em_free(unsch_event); - return; - } - } - for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { - em_event_t event; - - event = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, perf_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF || - sizeof(perf_event_t) != - em_event_get_size(event), - "EM alloc failed (%i)", i); - - perf_event = em_event_pointer(event); - perf_event->seq = i; - perf_event->send_time = env_time_global(); - - /* Allocate events evenly to the queues */ - q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; - - ret = em_send(event, q_ctx->sch_q.this_queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send:%" PRI_STAT "\n" - "Queue:%" PRI_QUEUE "", - ret, q_ctx->sch_q.this_queue); - em_free(event); - return; - } - } - } else { - for (i = first; i < queue_count; i++) { - em_event_t unsch_events[NUM_EVENTS]; - int num; - - q_ctx = &perf_shm->queue_context_tbl[i]; - - for (j = 0; j < NUM_EVENTS; j++) { - unsch_events[j] = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(unsch_events[j] == EM_EVENT_UNDEF, - "EM alloc failed (%d, %d)", i, j); - } - num = em_send_multi(unsch_events, NUM_EVENTS, - q_ctx->unsch_q.this_queue); - if (unlikely(num != NUM_EVENTS)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send multi:%d\n" - "Unsched-Q:%" PRI_QUEUE "", - num, q_ctx->unsch_q.this_queue); - em_free_multi(&unsch_events[num], NUM_EVENTS - num); - return; - } - } - for (i = first; i < queue_count; i++) { - em_event_t events[NUM_EVENTS]; - int num; - - q_ctx = &perf_shm->queue_context_tbl[i]; - - for (j = 0; j < NUM_EVENTS; j++) { - events[j] = em_alloc(sizeof(perf_event_t), - EM_EVENT_TYPE_SW, - perf_shm->pool); - test_fatal_if(events[j] == EM_EVENT_UNDEF || - sizeof(perf_event_t) != - em_event_get_size(events[j]), - "EM alloc failed (%d,%d)", i, j); - - perf_event = em_event_pointer(events[j]); - perf_event->seq = i * NUM_EVENTS + j; - perf_event->send_time = env_time_global(); - } - num = em_send_multi(events, NUM_EVENTS, - q_ctx->sch_q.this_queue); - if (unlikely(num != NUM_EVENTS)) { - test_fatal_if(!appl_shm->exit_flag, - "EM send multi:%d\n" - "Queue:%" PRI_QUEUE "", - num, q_ctx->sch_q.this_queue); - em_free_multi(&events[num], NUM_EVENTS - num); - return; - } - } - } - - perf_shm->test_status.queues = queue_count; - perf_shm->test_status.step++; - - APPL_PRINT("\nNumber of queues: %6.0d + %d\n", - queue_count, queue_count); - if (CONST_NUM_EVENTS) - APPL_PRINT("Number of events: %6.0d + %d\n", - CONST_NUM_EVENTS / 2, CONST_NUM_EVENTS / 2); - else - APPL_PRINT("Number of events: %6.0d + %d\n", - queue_count * NUM_EVENTS, queue_count * NUM_EVENTS); -} - -/** - * @private - * - * EO start function. - * - */ -static em_status_t -start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - eo_context_t *eo_ctx = eo_context; - - (void)conf; - - APPL_PRINT("EO %" PRI_EO " starting.\n", eo); - - eo_ctx->eo_id = eo; - - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t -stop(void *eo_context, em_eo_t eo) -{ - em_status_t ret; - - (void)eo_context; - - APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); - - /* remove and delete all of the EO's queues */ - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", - ret, eo); - - return EM_OK; -} - -/** - * @private - * - * EO receive function. - * - * Loops back events and calculates the event rate. - */ -static void -receive_func(void *eo_context, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_context) -{ - queue_context_t *q_ctx; - em_queue_t dst_queue; - em_queue_t src_unsch_queue; - em_queue_t dst_unsch_queue; - int ret; - int is_freed; - int i; - - (void)eo_context; - (void)type; - - if (unlikely(appl_shm->exit_flag)) { - em_free(event); - return; - } - - q_ctx = q_context; - - /* - * Store up to 'NUM_STORAGE - 1' events before sending them to the next - * queue with em_send_multi(). - */ - if (q_ctx->sch_q.stored_count < NUM_STORAGE - 1 && - !perf_shm->test_status.free_flag) { - em_status_t stat; - - q_ctx->sch_q.stored_count++; - stat = em_send(event, q_ctx->sch_q.storage); - test_fatal_if(stat != EM_OK, - "EM send:%" PRI_STAT " storage-Q:%" PRI_QUEUE "", - stat, q_ctx->sch_q.storage); - /* Return if the event was stored */ - return; - } - - /* - * 'NUM_STORAGE' events received (or test step ending), now send - * the scheduled events and an equal number of unscheduled events to - * the next queue. - */ - em_event_t events[NUM_STORAGE]; - em_event_t unsch_events[NUM_STORAGE]; - perf_event_t *perf_events[NUM_STORAGE]; - int num, num_unsch; - env_time_t recv_time, send_time; - - num = em_queue_dequeue_multi(q_ctx->sch_q.storage, - events, NUM_STORAGE - 1); - q_ctx->sch_q.stored_count -= num; - - /* Store incoming event last */ - events[num] = event; - num++; - - /* - * Dequeue an unscheduled event for every received scheduled event - */ - src_unsch_queue = q_ctx->unsch_q.this_queue; - num_unsch = em_queue_dequeue_multi(src_unsch_queue, - unsch_events, num); - test_fatal_if(num_unsch != num, - "perf_test_queues: %d != %d", num_unsch, num); - - /* - * Helper: Update the test state, count recv events, - * calc & print stats, prepare for next step - */ - if (MEASURE_LATENCY) { - recv_time = env_time_global(); - for (i = 0; i < num; i++) - perf_events[i] = em_event_pointer(events[i]); - for (i = 0; i < num; i++) - measure_latency(perf_events[i], q_ctx, recv_time); - } - - if (ALLOC_FREE_PER_EVENT) - for (i = 0; i < num; i++) - events[i] = alloc_free_per_event(events[i]); - - dst_queue = q_ctx->sch_q.next_queue; - dst_unsch_queue = q_ctx->unsch_q.next_queue; - - is_freed = 0; - for (i = 0; i < num; i++) - if (update_test_state(events[i], unsch_events[i])) - is_freed++; - - num -= is_freed; - if (num == 0) - return; - - test_fatal_if(num < 0 || num > NUM_STORAGE, - "invalid number of events:%d", num); - test_fatal_if(queue != q_ctx->sch_q.this_queue, - "perf_test_queues: Queue config error"); - - /* Enqueue the unscheduled event to the next unscheduled queue */ - ret = em_send_multi(unsch_events, num, dst_unsch_queue); - test_fatal_if(ret != num, - "EM send multi:%d unsched-queue:%" PRI_QUEUE "", - ret, dst_unsch_queue); - - /* Send the scheduled event to the next scheduled queue */ - if (MEASURE_LATENCY) { - if (ALLOC_FREE_PER_EVENT) { - for (i = 0; i < num; i++) - perf_events[i] = em_event_pointer(events[i]); - } - send_time = env_time_global(); - for (i = 0; i < num; i++) - perf_events[i]->send_time = send_time; - } - ret = em_send_multi(events, num, dst_queue); - if (unlikely(ret != num)) { - for (i = ret; i < num; i++) - em_free(events[i]); - test_fatal_if(!appl_shm->exit_flag, - "EM send multi:%d queue:%" PRI_QUEUE "", - ret, dst_queue); - } -} - -/** - * Receive function helper: Update the test state - * - * Calculates the number of received events, maintains & prints test statistics - * and restarts/reconfigures the test for the next queue/event-setup - * - * @return '1' if the caller receive function should immediately return, - * '0' otherwise - */ -static inline int -update_test_state(em_event_t event, em_event_t unsch_event) -{ - uint64_t events; - uint64_t freed_count; - uint64_t ready_count; - const int core = em_core_id(); - test_status_t *const tstat = &perf_shm->test_status; - core_stat_t *const cstat = &perf_shm->core_stat[core]; - - events = cstat->events; - /* one scheduled and one unscheduled event received */ - events += 2; - - if (unlikely(tstat->reset_flag)) { - events = 0; - if (CONST_NUM_EVENTS) { - /* Free all old events before allocating new ones. */ - if (unlikely(tstat->free_flag)) { - em_free(event); - em_free(unsch_event); - freed_count = - env_atomic64_add_return(&tstat->freed_count, 2); - if (freed_count == CONST_NUM_EVENTS) { - /* Last event */ - env_atomic64_set(&tstat->freed_count, - 0); - tstat->reset_flag = 0; - tstat->free_flag = 0; - queue_step(); - } - /* Req caller receive-func to return */ - return 1; - } - } - - if (unlikely(core_state != CORE_STATE_IDLE)) { - core_state = CORE_STATE_IDLE; - cstat->begin_time = ENV_TIME_NULL; - - ready_count = - env_atomic64_add_return(&tstat->ready_count, 1); - - if (ready_count == (uint64_t)tstat->num_cores) { - env_atomic64_set(&tstat->ready_count, 0); - - if (CONST_NUM_EVENTS) { - int sample = tstat->samples; - int queues = tstat->queues; - - if (sample == 0 && queues < NUM_QUEUES) - tstat->free_flag = 1; - else - tstat->reset_flag = 0; - } else { - tstat->reset_flag = 0; - } - } - } - } else if (unlikely(events == 2)) { - cstat->begin_time = env_time_global(); - cstat->latency.events = 0; - cstat->latency.hi_prio_ave = ENV_TIME_NULL; - cstat->latency.hi_prio_max = ENV_TIME_NULL; - cstat->latency.lo_prio_ave = ENV_TIME_NULL; - cstat->latency.lo_prio_max = ENV_TIME_NULL; - - core_state = CORE_STATE_MEASURE; - } else if (unlikely(events == EVENTS_PER_SAMPLE)) { - /* - * Measurements done for this step. Store results and continue - * receiving events until all cores are done. - */ - env_time_t begin_time, end_time; - - cstat->end_time = env_time_global(); - - end_time = cstat->end_time; - begin_time = cstat->begin_time; - cstat->diff_time = env_time_diff(end_time, begin_time); - - ready_count = env_atomic64_add_return(&tstat->ready_count, 1); - - /* - * Check whether all cores are done with the step, - * and if done proceed to the next step - */ - if (unlikely((int)ready_count == tstat->num_cores)) { - /* No real need for atomicity here, ran on last core*/ - env_atomic64_set(&tstat->ready_count, 0); - - tstat->reset_flag = 1; - tstat->samples++; - - /* - * Print statistics. - * Omit prints for the first sample round to allow the - * test to stabilize after setups and teardowns. - */ - if (tstat->samples > 1) { - int print_header = tstat->samples == 2 ? 1 : 0; - - print_test_statistics(tstat, print_header, - perf_shm->core_stat); - } - - /* - * Start next test step - setup new queues - */ - if (tstat->samples == NUM_SAMPLES && - tstat->queues < NUM_QUEUES) { - if (!CREATE_ALL_QUEUES_AT_STARTUP) { - int step = tstat->step; - int first_q = tstat->queues; - int num_qs = queue_steps[step] - - queue_steps[step - 1]; - - create_and_link_queues(first_q, num_qs); - } - - if (!CONST_NUM_EVENTS) - queue_step(); - - tstat->samples = 0; - } - } - } - - cstat->events = events; - - return 0; -} - -/** - * Creates a number of EM queues, associates them with EOs, and links them. - */ -static void -create_and_link_queues(int start_queue, int num_queues) -{ - int i, j; - em_queue_t queue, next_queue; - em_queue_t queue_unscheduled, next_unscheduled, storage_unscheduled; - em_queue_conf_t unsch_conf; - em_queue_prio_t prio; - em_status_t ret; - queue_context_t *q_ctx; - - APPL_PRINT("\nCreate new queues - scheduled:%d + unscheduled:%d\n", - num_queues, num_queues); - - if (num_queues % NUM_EOS != 0) { - APPL_PRINT("%s() arg 'num_queues'=%d not multiple of NUM_EOS=%d\n", - __func__, num_queues, NUM_EOS); - return; - } - - memset(&unsch_conf, 0, sizeof(unsch_conf)); - if (QUEUE_TYPE == EM_QUEUE_TYPE_ATOMIC) { - /* - * If the EO receives are running with an atomic context then - * unsched queue enq/deq can be multithread unsafe to possibly - * boost perf. - */ - unsch_conf.flags |= EM_QUEUE_FLAG_ENQ_NOT_MTSAFE; - unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE; - } - - for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { - next_queue = EM_QUEUE_UNDEF; - next_unscheduled = EM_QUEUE_UNDEF; - - for (j = 0; j < NUM_EOS; j++) { - prio = EM_QUEUE_PRIO_NORMAL; - - if (MEASURE_LATENCY) { - if (j == 0) - prio = EM_QUEUE_PRIO_HIGH; - } - - q_ctx = &perf_shm->queue_context_tbl[i + j]; - - /* - * Create a scheduled queue + associated storage queue - */ - /* storage first... */ - storage_unscheduled = - em_queue_create("unscheduled_storage", - EM_QUEUE_TYPE_UNSCHEDULED, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, - &unsch_conf); - if (storage_unscheduled == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - 3 * i + 1); - return; - } - q_ctx->sch_q.storage = storage_unscheduled; - - /* ...then sched queue */ - queue = em_queue_create("sched_queue", QUEUE_TYPE, prio, - EM_QUEUE_GROUP_DEFAULT, NULL); - if (queue == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - 3 * i); - return; - } - q_ctx->sch_q.this_queue = queue; - q_ctx->sch_q.prio = prio; - - ret = em_queue_set_context(queue, q_ctx); - test_fatal_if(ret != EM_OK, - "em_queue_set_context:%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - /* Add the scheduled queue to an EO and enable it */ - ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); - if (unlikely(ret != EM_OK)) { - test_fatal_if(!appl_shm->exit_flag, - "em_eo_add_queue_sync():%" PRI_STAT "\n" - "EO:%" PRI_EO " Q:%" PRI_QUEUE "", - ret, perf_shm->eo[j], queue); - em_queue_delete(queue); - return; - } - - /* Link scheduled queues */ - q_ctx->sch_q.next_queue = next_queue; - - /* - * Create a new unscheduled queue - */ - queue_unscheduled = - em_queue_create("unscheduled_queue", - EM_QUEUE_TYPE_UNSCHEDULED, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, - &unsch_conf); - if (queue_unscheduled == EM_QUEUE_UNDEF) { - APPL_PRINT("Max nbr of supported queues: %d\n", - 3 * i + 2); - return; - } - q_ctx->unsch_q.this_queue = queue_unscheduled; - - /* Link unscheduled queues */ - q_ctx->unsch_q.next_queue = next_unscheduled; - - /* - * Set the same top level queue context for both the - * scheduled and the unscheduled queue, access queue - * specific context by using q_ctx->sch_q.* or - * q_ctx->unsch_q.* - this eliminates the need to call - * em_queue_get_context() for each event for the - * unscheduled queues - */ - ret = em_queue_set_context(queue_unscheduled, q_ctx); - test_fatal_if(ret != EM_OK, - "em_queue_set_context:%" PRI_STAT "\n" - "Unscheduled queue:%" PRI_QUEUE "", - ret, queue_unscheduled); - - /* Sanity check */ - test_fatal_if(em_queue_get_context(queue) != - em_queue_get_context(queue_unscheduled), - "em_queue_get_context failed."); - - next_queue = queue; - next_unscheduled = queue_unscheduled; - } - - /* Connect first scheduled queue to the last */ - q_ctx = &perf_shm->queue_context_tbl[i + 0]; - q_ctx->sch_q.next_queue = next_queue; - q_ctx->unsch_q.next_queue = next_unscheduled; - } -} - -/** - * Print test statistics - */ -static void -print_test_statistics(test_status_t *test_status, int print_header, - core_stat_t core_stat[]) -{ - const int num_cores = test_status->num_cores; - const uint64_t cpu_hz = test_status->cpu_hz; - const double cpu_mhz = test_status->cpu_mhz; - const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; - const uint64_t print_count = test_status->print_count++; - env_time_t total_time = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) - total_time = env_time_sum(total_time, core_stat[i].diff_time); - - double cycles_per_event = 0.0; - double events_per_sec = 0.0; - - if (likely(total_events > 0)) - cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / - (double)total_events; - if (likely(cycles_per_event > 0)) /* Million events/s: */ - events_per_sec = cpu_mhz * num_cores / cycles_per_event; - - /* - * Print without latency statistics - */ - if (!MEASURE_LATENCY) { - if (print_header) - APPL_PRINT(RESULT_PRINTF_HDR); - APPL_PRINT(RESULT_PRINTF_FMT, - cycles_per_event, events_per_sec, - cpu_mhz, print_count); - return; - } - - /* - * Print with latency statistics - */ - uint64_t latency_events = 0; - env_time_t latency_hi_ave = ENV_TIME_NULL; - env_time_t latency_hi_max = ENV_TIME_NULL; - env_time_t latency_lo_ave = ENV_TIME_NULL; - env_time_t latency_lo_max = ENV_TIME_NULL; - - for (int i = 0; i < num_cores; i++) { - latency_events += core_stat[i].latency.events; - - latency_hi_ave = env_time_sum(latency_hi_ave, - core_stat[i].latency.hi_prio_ave); - latency_lo_ave = env_time_sum(latency_lo_ave, - core_stat[i].latency.lo_prio_ave); - - if (env_time_cmp(core_stat[i].latency.hi_prio_max, - latency_hi_max) > 0) { - latency_hi_max = core_stat[i].latency.hi_prio_max; - } - if (env_time_cmp(core_stat[i].latency.lo_prio_max, - latency_lo_max) > 0) { - latency_lo_max = core_stat[i].latency.lo_prio_max; - } - } - - double lat_per_hi_ave = 0.0; - double lat_per_lo_ave = 0.0; - - if (likely(latency_events > 0)) { - lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / - (double)latency_events; - lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / - (double)latency_events; - } - - if (print_header) - APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); - APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, - cycles_per_event, events_per_sec, lat_per_hi_ave, - env_time_to_cycles(latency_hi_max, cpu_hz), - lat_per_lo_ave, - env_time_to_cycles(latency_lo_max, cpu_hz), - cpu_mhz, print_count); -} - -/** - * Free the input event and allocate a new one instead - */ -static inline em_event_t -alloc_free_per_event(em_event_t event) -{ - perf_event_t *perf_event = em_event_pointer(event); - env_time_t send_time = perf_event->send_time; - int seq = perf_event->seq; - size_t event_size = em_event_get_size(event); - - em_free(event); - - event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); - - perf_event = em_event_pointer(event); - - perf_event->send_time = send_time; - perf_event->seq = seq; - - return event; -} - -/** - * Measure the scheduling latency per event - */ -static inline void -measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, - env_time_t recv_time) -{ - const int core = em_core_id(); - core_stat_t *const cstat = &perf_shm->core_stat[core]; - const env_time_t send_time = perf_event->send_time; - env_time_t latency; - - if (perf_shm->test_status.reset_flag || - cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) - return; - - cstat->latency.events++; - - latency = env_time_diff(recv_time, send_time); - - if (q_ctx->sch_q.prio == EM_QUEUE_PRIO_HIGH) { - cstat->latency.hi_prio_ave = - env_time_sum(cstat->latency.hi_prio_ave, latency); - if (env_time_cmp(latency, cstat->latency.hi_prio_max) > 0) - cstat->latency.hi_prio_max = latency; - } else { - cstat->latency.lo_prio_ave = - env_time_sum(cstat->latency.lo_prio_ave, latency); - if (env_time_cmp(latency, cstat->latency.lo_prio_max) > 0) - cstat->latency.lo_prio_max = latency; - } -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine performance test for burst sending of events. + * (based on the queues_unscheduled.c test and extends it to use burst + * sending of events into the next queue, see em_send_multi() & + * em_queue_dequeue_multi()) + * + * Measures the average cycles consumed during an event send-sched-receive loop + * for a certain number of queues and events in the system. The test increases + * the number of queues[+events] for each measurement round and prints the + * results. The test will stop if the maximum number of supported queues by the + * system is reached. + * + * Each normal scheduled queue is accompanied by an unscheduled queue. Received + * events are stored until a suitable length event burst is available, then the + * whole burst is forwarded to the next queue in the chain using + * em_send_multi(). Each stored burst is accompanied by another burst taken + * from the associated unscheduled queue. + * Both the received scheduled events and the unscheduled dequeued events are + * sent as bursts to the next queue at the end of the receive function. + * + * The measured cycles contain the scheduled event send_multi-sched-receive + * cycles as well as the unscheduled event multi_dequeue. + * + * Plot the cycles/event to get an idea of how the system scales with an + * increasing number of queues. + */ + +#include +#include +#include + +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +/* + * Test options: + */ + +/* Alloc and free per event */ +#define ALLOC_FREE_PER_EVENT 0 /* false=0 or true=1 */ + +/* + * Create all EM queues at startup or create the queues during + * the test in steps. + */ +#define CREATE_ALL_QUEUES_AT_STARTUP 0 /* false=0 or true=1 */ + +/* + * Measure the send-enqueue-schedule-receive latency. Measured separately for + * 'high priority and 'low priority' queues (ratio 1:4). + */ +#define MEASURE_LATENCY 1 /* false=0 or true=1 */ + +/* + * Keep the number of events constant while increasing the number of queues. + * Should be dividable by or factor of queue_step. + */ +#define CONST_NUM_EVENTS 4096 /* true>0 or false=0 */ + +/* + * Test configuration: + */ + +#define MAX_CORES 64 + +/* Number of EO's and scheduled queues in a loop */ +#define NUM_EOS 4 + +/* Number of events per queue */ +#define NUM_EVENTS 4 + +#if CONST_NUM_EVENTS > 0 +/* + * Total number of queues when using a constant number of events. + * Make sure that all queues, both scheduled and unscheduled (hence /2), + * get 'NUM_EVENTS' events per queue. + */ +#define NUM_QUEUES ((CONST_NUM_EVENTS / NUM_EVENTS) / 2) +#else +/* + * Total number of queues when increasing the total event count for each queue + * step. + */ +#define NUM_QUEUES (NUM_EOS * 16 * 1024) +#endif + +/* Number of events to collect before sending out with send-multi */ +#define NUM_STORAGE 4 + +/* Number of data bytes in an event */ +#define DATA_SIZE 128 + +/* Samples before adding more queues */ +#define NUM_SAMPLES (1 + 8) /* setup(1) + measure(N) */ + +/* Num events a core processes between samples */ +#define EVENTS_PER_SAMPLE 0x400000 + +/* EM queue type */ +#define QUEUE_TYPE EM_QUEUE_TYPE_ATOMIC + +/* Core states during test. */ +#define CORE_STATE_MEASURE 0 +#define CORE_STATE_IDLE 1 + +/* Result APPL_PRINT() format string */ +#define RESULT_PRINTF_HDR "Cycles/Event Events/s cpu-freq\n" +#define RESULT_PRINTF_FMT "%12.0f %7.0f M %5.0f MHz %" PRIu64 "\n" + +/* Result APPL_PRINT() format string when MEASURE_LATENCY is used */ +#define RESULT_PRINTF_LATENCY_HDR \ +"Cycles/ Events/ Latency:\n" \ +" Event Sec hi-ave hi-max lo-ave lo-max cpu-freq\n" +#define RESULT_PRINTF_LATENCY_FMT \ +"%6.0f %7.2f M %8.0f %7" PRIu64 " %7.0f %7" PRIu64 " %5.0f MHz %" PRIu64 "\n" + +/* + * The number of scheduled queues to use in each test step. + * Additional unscheduled queues are also created for each step. + * + * NOTE: The max queue step is always 'NUM_QUEUES', even if the value of + * 'NUM_QUEUES' would be smaller than a listed queue step (then just stop + * before reaching the end of the list). + */ +static const int queue_steps[] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048, + 4096, 8192, 16384, 32768, 65536, NUM_QUEUES}; + +/** + * Test state, + * cache line alignment and padding handled in 'perf_shm_t' + */ +typedef struct { + int queues; + int step; + int samples; + int num_cores; + int reset_flag; + double cpu_mhz; + uint64_t cpu_hz; + uint64_t print_count; + env_atomic64_t ready_count; + /* if using CONST_NUM_EVENTS:*/ + int free_flag; + env_atomic64_t freed_count; +} test_status_t; + +/** + * Performance test statistics (per core) + */ +typedef struct { + uint64_t events; + env_time_t begin_time; + env_time_t end_time; + env_time_t diff_time; + struct { + uint64_t events; + env_time_t hi_prio_ave; + env_time_t hi_prio_max; + env_time_t lo_prio_ave; + env_time_t lo_prio_max; + } latency; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} core_stat_t; + +COMPILE_TIME_ASSERT(sizeof(core_stat_t) % ENV_CACHE_LINE_SIZE == 0, + CORE_STAT_SIZE_ERROR); + +/** + * EO context data + */ +typedef struct { + em_eo_t eo_id; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} eo_context_t; + +COMPILE_TIME_ASSERT(sizeof(eo_context_t) % ENV_CACHE_LINE_SIZE == 0, + EO_CONTEXT_T__SIZE_ERROR); + +/** + * Queue context data + * Keep the scheduled queue context and the associated unscheduled queue + * context data in the same cache line for faster access - also eliminates + * the need to call em_queue_get_context() for the unscheduled queues. + */ +typedef struct { + struct scheduled_queue_context { + /** This queue */ + em_queue_t this_queue; + /** Event storage */ + em_queue_t storage; + /** Next queue */ + em_queue_t next_queue; + /** Priority of 'this_queue' */ + em_queue_prio_t prio; + /** Number of events enqueued in 'storage' */ + int stored_count; + } sch_q; + + struct unscheduled_queue_context { + /** This unscheduled queue */ + em_queue_t this_queue; + /** Next unscheduled queue */ + em_queue_t next_queue; + } unsch_q; + + /** Pad to multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_context_t; + +COMPILE_TIME_ASSERT(sizeof(queue_context_t) % ENV_CACHE_LINE_SIZE == 0, + QUEUE_CONTEXT_SIZE_ERROR); + +/** + * Performance test event + */ +typedef struct { + /* Send time stamp */ + env_time_t send_time; + /* Sequence number */ + int seq; + /* Test data */ + uint8_t data[DATA_SIZE]; +} perf_event_t; + +/** + * Test shared memory + */ +typedef struct { + /* Event pool used by this application */ + em_pool_t pool; + + test_status_t test_status ENV_CACHE_LINE_ALIGNED; + + core_stat_t core_stat[MAX_CORES] ENV_CACHE_LINE_ALIGNED; + + eo_context_t eo_context_tbl[NUM_EOS] ENV_CACHE_LINE_ALIGNED; + + queue_context_t queue_context_tbl[NUM_QUEUES] ENV_CACHE_LINE_ALIGNED; + /* EO ID's */ + em_eo_t eo[NUM_EOS] ENV_CACHE_LINE_ALIGNED; +} perf_shm_t; + +COMPILE_TIME_ASSERT(sizeof(perf_shm_t) % ENV_CACHE_LINE_SIZE == 0, + PERF_SHM_T__SIZE_ERROR); + +/* EM-core local pointer to shared memory */ +static ENV_LOCAL perf_shm_t *perf_shm; + +/* EM-core local state */ +static ENV_LOCAL int core_state = CORE_STATE_MEASURE; + +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args); + +static void +queue_step(void); + +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); + +static em_status_t +stop(void *eo_context, em_eo_t eo); + +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context); + +static int +update_test_state(em_event_t event, em_event_t unsch_event); + +static void +create_and_link_queues(int start_queue, int num_queues); + +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]); + +static inline em_event_t +alloc_free_per_event(em_event_t event); + +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time); + +/** + * Main function + * + * Call cm_setup() to perform test & EM setup common for all the + * test applications. + * + * cm_setup() will call test_init() and test_start() and launch + * the EM dispatch loop on every EM-core. + */ +int main(int argc, char *argv[]) +{ + return cm_setup(argc, argv); +} + +/** + * Test error handler + * + * @param eo Execution object id + * @param error The error code + * @param escope Error scope + * @param args List of arguments (__FILE__, __func__, __LINE__, + * (format), ## __VA_ARGS__) + * + * @return The original error code. + */ +static em_status_t +error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, va_list args) +{ + if (escope == EM_ESCOPE_QUEUE_CREATE && !EM_ERROR_IS_FATAL(error)) { + APPL_PRINT("\nUnable to create more queues\n\n" + "Test finished\n"); + raise(SIGINT); + return error; + } + + if (appl_shm->exit_flag && EM_ESCOPE(escope) && + !EM_ERROR_IS_FATAL(error)) { + /* Suppress non-fatal EM-error logs during tear-down */ + if (escope == EM_ESCOPE_EO_ADD_QUEUE_SYNC) { + APPL_PRINT("\nExit: suppress queue setup error\n\n"); + return error; + } + } + + return test_error_handler(eo, error, escope, args); +} + +/** + * Init of the Queues performance test application. + * + * @attention Run on all cores. + * + * @see cm_setup() for setup and dispatch. + */ +void +test_init(void) +{ + int core = em_core_id(); + + if (core == 0) { + perf_shm = env_shared_reserve("PerfQueuesSharedMem", + sizeof(perf_shm_t)); + em_register_error_handler(error_handler); + } else { + perf_shm = env_shared_lookup("PerfQueuesSharedMem"); + } + + if (perf_shm == NULL) + test_error(EM_ERROR_SET_FATAL(0xec0de), 0xdead, + "Perf test queues init failed on EM-core: %u\n", + em_core_id()); + else if (core == 0) + memset(perf_shm, 0, sizeof(perf_shm_t)); +} + +/** + * Startup of the Queues performance test application. + * + * @attention Run only on EM core 0. + * + * @param appl_conf Application configuration + * + * @see cm_setup() for setup and dispatch. + */ +void +test_start(appl_conf_t *const appl_conf) +{ + eo_context_t *eo_ctx; + em_status_t ret, start_ret = EM_ERROR; + const int q_ctx_size = sizeof(perf_shm->queue_context_tbl); + int i; + + /* + * Store the event pool to use, use the EM default pool if no other + * pool is provided through the appl_conf. + */ + if (appl_conf->num_pools >= 1) + perf_shm->pool = appl_conf->pools[0]; + else + perf_shm->pool = EM_POOL_DEFAULT; + + APPL_PRINT("\n" + "***********************************************************\n" + "EM APPLICATION: '%s' initializing:\n" + " %s: %s() - EM-core:%i\n" + " Application running on %d EM-cores (procs:%d, threads:%d)\n" + " using event pool:%" PRI_POOL "\n" + " Max. NUM_QUEUES: %i\n" + " sizeof queue_context_tbl: %i kB\n" + "***********************************************************\n" + "\n", + appl_conf->name, NO_PATH(__FILE__), __func__, em_core_id(), + em_core_count(), + appl_conf->num_procs, appl_conf->num_threads, + perf_shm->pool, NUM_QUEUES, q_ctx_size / 1024); + + test_fatal_if(perf_shm->pool == EM_POOL_UNDEF, + "Undefined application event pool!"); + + perf_shm->test_status.cpu_hz = env_core_hz(); + perf_shm->test_status.cpu_mhz = (double)perf_shm->test_status.cpu_hz / + 1000000.0; + perf_shm->test_status.reset_flag = 0; + perf_shm->test_status.num_cores = em_core_count(); + perf_shm->test_status.free_flag = 0; + + env_atomic64_init(&perf_shm->test_status.ready_count); + env_atomic64_init(&perf_shm->test_status.freed_count); + + /* Create EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo_ctx = &perf_shm->eo_context_tbl[i]; + perf_shm->eo[i] = em_eo_create("perf test eo", start, NULL, + stop, NULL, receive_func, + eo_ctx); + test_fatal_if(perf_shm->eo[i] == EM_EO_UNDEF, + "EO create failed:%d", i, NUM_EOS); + } + + APPL_PRINT(" EOs created\n"); + + /* + * Create and link queues + */ + if (CREATE_ALL_QUEUES_AT_STARTUP) /* Create ALL queues at once */ + create_and_link_queues(0, NUM_QUEUES); + else /* Create queues for the first step, then more before each step */ + create_and_link_queues(0, queue_steps[0]); + + /* Start EOs */ + for (i = 0; i < NUM_EOS; i++) { + ret = em_eo_start_sync(perf_shm->eo[i], &start_ret, NULL); + test_fatal_if(ret != EM_OK || start_ret != EM_OK, + "EO start(%d):%" PRI_STAT " %" PRI_STAT "", + i, ret, start_ret); + } + + queue_step(); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_eo_t eo; + em_status_t ret; + int i; + + (void)appl_conf; + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + /* Stop & delete EOs */ + for (i = 0; i < NUM_EOS; i++) { + eo = perf_shm->eo[i]; + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " stop:%" PRI_STAT "", + eo, ret); + + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, + "EO:%" PRI_EO " delete:%" PRI_STAT "", + eo, ret); + } + + /* Delete the unscheduled queues */ + for (i = 0; i < NUM_QUEUES; i++) { + queue_context_t *q_ctx = &perf_shm->queue_context_tbl[i]; + em_queue_t unsch_queue = q_ctx->unsch_q.this_queue; + em_event_t unsch_event; + + if (unsch_queue == EM_QUEUE_UNDEF) + continue; + + for (;;) { + unsch_event = em_queue_dequeue(unsch_queue); + if (unsch_event == EM_EVENT_UNDEF) + break; + em_free(unsch_event); + } + ret = em_queue_delete(unsch_queue); + test_fatal_if(ret != EM_OK, + "Unsch-Queue:%" PRI_QUEUE " delete:%" PRI_STAT "", + unsch_queue, ret); + } + /* Delete the unscheduled 'storage' queues */ + for (i = 0; i < NUM_QUEUES; i++) { + queue_context_t *q_ctx = &perf_shm->queue_context_tbl[i]; + em_queue_t unsch_queue = q_ctx->sch_q.storage; + em_event_t unsch_event; + + if (unsch_queue == EM_QUEUE_UNDEF) + continue; + + for (;;) { + unsch_event = em_queue_dequeue(unsch_queue); + if (unsch_event == EM_EVENT_UNDEF) + break; + em_free(unsch_event); + } + ret = em_queue_delete(unsch_queue); + test_fatal_if(ret != EM_OK, + "Unsch-Queue:%" PRI_QUEUE " delete:%" PRI_STAT "", + unsch_queue, ret); + } +} + +void +test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (core == 0) { + env_shared_free(perf_shm); + em_unregister_error_handler(); + } +} + +/** + * Allocate, initialize and send test step events. + */ +static void +queue_step(void) +{ + queue_context_t *q_ctx; + perf_event_t *perf_event; + em_status_t ret; + const int first = perf_shm->test_status.queues; + const int step = perf_shm->test_status.step; + const int queue_count = queue_steps[step]; + int i, j; + + /* Allocate and send test events for the queues */ + if (CONST_NUM_EVENTS) { + for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { + em_event_t unsch_event; + + unsch_event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(unsch_event == EM_EVENT_UNDEF, + "EM alloc failed (%i)", i); + + /* Allocate events evenly to the queues */ + q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; + + ret = em_send(unsch_event, q_ctx->unsch_q.this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Unsched-Q:%" PRI_QUEUE "", + ret, q_ctx->unsch_q.this_queue); + em_free(unsch_event); + return; + } + } + for (i = 0; i < CONST_NUM_EVENTS / 2; i++) { + em_event_t event; + + event = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, perf_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF || + sizeof(perf_event_t) != + em_event_get_size(event), + "EM alloc failed (%i)", i); + + perf_event = em_event_pointer(event); + perf_event->seq = i; + perf_event->send_time = env_time_global(); + + /* Allocate events evenly to the queues */ + q_ctx = &perf_shm->queue_context_tbl[i % queue_count]; + + ret = em_send(event, q_ctx->sch_q.this_queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send:%" PRI_STAT "\n" + "Queue:%" PRI_QUEUE "", + ret, q_ctx->sch_q.this_queue); + em_free(event); + return; + } + } + } else { + for (i = first; i < queue_count; i++) { + em_event_t unsch_events[NUM_EVENTS]; + int num; + + q_ctx = &perf_shm->queue_context_tbl[i]; + + for (j = 0; j < NUM_EVENTS; j++) { + unsch_events[j] = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(unsch_events[j] == EM_EVENT_UNDEF, + "EM alloc failed (%d, %d)", i, j); + } + num = em_send_multi(unsch_events, NUM_EVENTS, + q_ctx->unsch_q.this_queue); + if (unlikely(num != NUM_EVENTS)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send multi:%d\n" + "Unsched-Q:%" PRI_QUEUE "", + num, q_ctx->unsch_q.this_queue); + em_free_multi(&unsch_events[num], NUM_EVENTS - num); + return; + } + } + for (i = first; i < queue_count; i++) { + em_event_t events[NUM_EVENTS]; + int num; + + q_ctx = &perf_shm->queue_context_tbl[i]; + + for (j = 0; j < NUM_EVENTS; j++) { + events[j] = em_alloc(sizeof(perf_event_t), + EM_EVENT_TYPE_SW, + perf_shm->pool); + test_fatal_if(events[j] == EM_EVENT_UNDEF || + sizeof(perf_event_t) != + em_event_get_size(events[j]), + "EM alloc failed (%d,%d)", i, j); + + perf_event = em_event_pointer(events[j]); + perf_event->seq = i * NUM_EVENTS + j; + perf_event->send_time = env_time_global(); + } + num = em_send_multi(events, NUM_EVENTS, + q_ctx->sch_q.this_queue); + if (unlikely(num != NUM_EVENTS)) { + test_fatal_if(!appl_shm->exit_flag, + "EM send multi:%d\n" + "Queue:%" PRI_QUEUE "", + num, q_ctx->sch_q.this_queue); + em_free_multi(&events[num], NUM_EVENTS - num); + return; + } + } + } + + perf_shm->test_status.queues = queue_count; + perf_shm->test_status.step++; + + APPL_PRINT("\nNumber of queues: %6.0d + %d\n", + queue_count, queue_count); + if (CONST_NUM_EVENTS) + APPL_PRINT("Number of events: %6.0d + %d\n", + CONST_NUM_EVENTS / 2, CONST_NUM_EVENTS / 2); + else + APPL_PRINT("Number of events: %6.0d + %d\n", + queue_count * NUM_EVENTS, queue_count * NUM_EVENTS); +} + +/** + * @private + * + * EO start function. + * + */ +static em_status_t +start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + eo_context_t *eo_ctx = eo_context; + + (void)conf; + + APPL_PRINT("EO %" PRI_EO " starting.\n", eo); + + eo_ctx->eo_id = eo; + + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t +stop(void *eo_context, em_eo_t eo) +{ + em_status_t ret; + + (void)eo_context; + + APPL_PRINT("EO %" PRI_EO " stopping.\n", eo); + + /* remove and delete all of the EO's queues */ + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", + ret, eo); + + return EM_OK; +} + +/** + * @private + * + * EO receive function. + * + * Loops back events and calculates the event rate. + */ +static void +receive_func(void *eo_context, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_context) +{ + queue_context_t *q_ctx; + em_queue_t dst_queue; + em_queue_t src_unsch_queue; + em_queue_t dst_unsch_queue; + int ret; + int is_freed; + int i; + + (void)eo_context; + (void)type; + + if (unlikely(appl_shm->exit_flag)) { + em_free(event); + return; + } + + q_ctx = q_context; + + /* + * Store up to 'NUM_STORAGE - 1' events before sending them to the next + * queue with em_send_multi(). + */ + if (q_ctx->sch_q.stored_count < NUM_STORAGE - 1 && + !perf_shm->test_status.free_flag) { + em_status_t stat; + + q_ctx->sch_q.stored_count++; + stat = em_send(event, q_ctx->sch_q.storage); + test_fatal_if(stat != EM_OK, + "EM send:%" PRI_STAT " storage-Q:%" PRI_QUEUE "", + stat, q_ctx->sch_q.storage); + /* Return if the event was stored */ + return; + } + + /* + * 'NUM_STORAGE' events received (or test step ending), now send + * the scheduled events and an equal number of unscheduled events to + * the next queue. + */ + em_event_t events[NUM_STORAGE]; + em_event_t unsch_events[NUM_STORAGE]; + perf_event_t *perf_events[NUM_STORAGE]; + int num, num_unsch; + env_time_t recv_time, send_time; + + num = em_queue_dequeue_multi(q_ctx->sch_q.storage, + events, NUM_STORAGE - 1); + q_ctx->sch_q.stored_count -= num; + + /* Store incoming event last */ + events[num] = event; + num++; + + /* + * Dequeue an unscheduled event for every received scheduled event + */ + src_unsch_queue = q_ctx->unsch_q.this_queue; + num_unsch = em_queue_dequeue_multi(src_unsch_queue, + unsch_events, num); + test_fatal_if(num_unsch != num, + "perf_test_queues: %d != %d", num_unsch, num); + + /* + * Helper: Update the test state, count recv events, + * calc & print stats, prepare for next step + */ + if (MEASURE_LATENCY) { + recv_time = env_time_global(); + for (i = 0; i < num; i++) + perf_events[i] = em_event_pointer(events[i]); + for (i = 0; i < num; i++) + measure_latency(perf_events[i], q_ctx, recv_time); + } + + if (ALLOC_FREE_PER_EVENT) + for (i = 0; i < num; i++) + events[i] = alloc_free_per_event(events[i]); + + dst_queue = q_ctx->sch_q.next_queue; + dst_unsch_queue = q_ctx->unsch_q.next_queue; + + is_freed = 0; + for (i = 0; i < num; i++) + if (update_test_state(events[i], unsch_events[i])) + is_freed++; + + num -= is_freed; + if (num == 0) + return; + + test_fatal_if(num < 0 || num > NUM_STORAGE, + "invalid number of events:%d", num); + test_fatal_if(queue != q_ctx->sch_q.this_queue, + "perf_test_queues: Queue config error"); + + /* Enqueue the unscheduled event to the next unscheduled queue */ + ret = em_send_multi(unsch_events, num, dst_unsch_queue); + test_fatal_if(ret != num, + "EM send multi:%d unsched-queue:%" PRI_QUEUE "", + ret, dst_unsch_queue); + + /* Send the scheduled event to the next scheduled queue */ + if (MEASURE_LATENCY) { + if (ALLOC_FREE_PER_EVENT) { + for (i = 0; i < num; i++) + perf_events[i] = em_event_pointer(events[i]); + } + send_time = env_time_global(); + for (i = 0; i < num; i++) + perf_events[i]->send_time = send_time; + } + ret = em_send_multi(events, num, dst_queue); + if (unlikely(ret != num)) { + for (i = ret; i < num; i++) + em_free(events[i]); + test_fatal_if(!appl_shm->exit_flag, + "EM send multi:%d queue:%" PRI_QUEUE "", + ret, dst_queue); + } +} + +/** + * Receive function helper: Update the test state + * + * Calculates the number of received events, maintains & prints test statistics + * and restarts/reconfigures the test for the next queue/event-setup + * + * @return '1' if the caller receive function should immediately return, + * '0' otherwise + */ +static inline int +update_test_state(em_event_t event, em_event_t unsch_event) +{ + uint64_t events; + uint64_t freed_count; + uint64_t ready_count; + const int core = em_core_id(); + test_status_t *const tstat = &perf_shm->test_status; + core_stat_t *const cstat = &perf_shm->core_stat[core]; + + events = cstat->events; + /* one scheduled and one unscheduled event received */ + events += 2; + + if (unlikely(tstat->reset_flag)) { + events = 0; + if (CONST_NUM_EVENTS) { + /* Free all old events before allocating new ones. */ + if (unlikely(tstat->free_flag)) { + em_free(event); + em_free(unsch_event); + freed_count = + env_atomic64_add_return(&tstat->freed_count, 2); + if (freed_count == CONST_NUM_EVENTS) { + /* Last event */ + env_atomic64_set(&tstat->freed_count, + 0); + tstat->reset_flag = 0; + tstat->free_flag = 0; + queue_step(); + } + /* Req caller receive-func to return */ + return 1; + } + } + + if (unlikely(core_state != CORE_STATE_IDLE)) { + core_state = CORE_STATE_IDLE; + cstat->begin_time = ENV_TIME_NULL; + + ready_count = + env_atomic64_add_return(&tstat->ready_count, 1); + + if (ready_count == (uint64_t)tstat->num_cores) { + env_atomic64_set(&tstat->ready_count, 0); + + if (CONST_NUM_EVENTS) { + int sample = tstat->samples; + int queues = tstat->queues; + + if (sample == 0 && queues < NUM_QUEUES) + tstat->free_flag = 1; + else + tstat->reset_flag = 0; + } else { + tstat->reset_flag = 0; + } + } + } + } else if (unlikely(events == 2)) { + cstat->begin_time = env_time_global(); + cstat->latency.events = 0; + cstat->latency.hi_prio_ave = ENV_TIME_NULL; + cstat->latency.hi_prio_max = ENV_TIME_NULL; + cstat->latency.lo_prio_ave = ENV_TIME_NULL; + cstat->latency.lo_prio_max = ENV_TIME_NULL; + + core_state = CORE_STATE_MEASURE; + } else if (unlikely(events == EVENTS_PER_SAMPLE)) { + /* + * Measurements done for this step. Store results and continue + * receiving events until all cores are done. + */ + env_time_t begin_time, end_time; + + cstat->end_time = env_time_global(); + + end_time = cstat->end_time; + begin_time = cstat->begin_time; + cstat->diff_time = env_time_diff(end_time, begin_time); + + ready_count = env_atomic64_add_return(&tstat->ready_count, 1); + + /* + * Check whether all cores are done with the step, + * and if done proceed to the next step + */ + if (unlikely((int)ready_count == tstat->num_cores)) { + /* No real need for atomicity here, ran on last core*/ + env_atomic64_set(&tstat->ready_count, 0); + + tstat->reset_flag = 1; + tstat->samples++; + + /* + * Print statistics. + * Omit prints for the first sample round to allow the + * test to stabilize after setups and teardowns. + */ + if (tstat->samples > 1) { + int print_header = tstat->samples == 2 ? 1 : 0; + + print_test_statistics(tstat, print_header, + perf_shm->core_stat); + } + + /* + * Start next test step - setup new queues + */ + if (tstat->samples == NUM_SAMPLES && + tstat->queues < NUM_QUEUES) { + if (!CREATE_ALL_QUEUES_AT_STARTUP) { + int step = tstat->step; + int first_q = tstat->queues; + int num_qs = queue_steps[step] - + queue_steps[step - 1]; + + create_and_link_queues(first_q, num_qs); + } + + if (!CONST_NUM_EVENTS) + queue_step(); + + tstat->samples = 0; + } + } + } + + cstat->events = events; + + return 0; +} + +/** + * Creates a number of EM queues, associates them with EOs, and links them. + */ +static void +create_and_link_queues(int start_queue, int num_queues) +{ + int i, j; + em_queue_t queue, next_queue; + em_queue_t queue_unscheduled, next_unscheduled, storage_unscheduled; + em_queue_conf_t unsch_conf; + em_queue_prio_t prio; + em_status_t ret; + queue_context_t *q_ctx; + + APPL_PRINT("\nCreate new queues - scheduled:%d + unscheduled:%d\n", + num_queues, num_queues); + + if (num_queues % NUM_EOS != 0) { + APPL_PRINT("%s() arg 'num_queues'=%d not multiple of NUM_EOS=%d\n", + __func__, num_queues, NUM_EOS); + return; + } + + memset(&unsch_conf, 0, sizeof(unsch_conf)); + if (QUEUE_TYPE == EM_QUEUE_TYPE_ATOMIC) { + /* + * If the EO receives are running with an atomic context then + * unsched queue enq/deq can be multithread unsafe to possibly + * boost perf. + */ + unsch_conf.flags |= EM_QUEUE_FLAG_ENQ_NOT_MTSAFE; + unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE; + } + + for (i = start_queue; i < (start_queue + num_queues); i += NUM_EOS) { + next_queue = EM_QUEUE_UNDEF; + next_unscheduled = EM_QUEUE_UNDEF; + + for (j = 0; j < NUM_EOS; j++) { + prio = EM_QUEUE_PRIO_NORMAL; + + if (MEASURE_LATENCY) { + if (j == 0) + prio = EM_QUEUE_PRIO_HIGH; + } + + q_ctx = &perf_shm->queue_context_tbl[i + j]; + + /* + * Create a scheduled queue + associated storage queue + */ + /* storage first... */ + storage_unscheduled = + em_queue_create("unscheduled_storage", + EM_QUEUE_TYPE_UNSCHEDULED, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, + &unsch_conf); + if (storage_unscheduled == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + 3 * i + 1); + return; + } + q_ctx->sch_q.storage = storage_unscheduled; + + /* ...then sched queue */ + queue = em_queue_create("sched_queue", QUEUE_TYPE, prio, + EM_QUEUE_GROUP_DEFAULT, NULL); + if (queue == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + 3 * i); + return; + } + q_ctx->sch_q.this_queue = queue; + q_ctx->sch_q.prio = prio; + + ret = em_queue_set_context(queue, q_ctx); + test_fatal_if(ret != EM_OK, + "em_queue_set_context:%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + /* Add the scheduled queue to an EO and enable it */ + ret = em_eo_add_queue_sync(perf_shm->eo[j], queue); + if (unlikely(ret != EM_OK)) { + test_fatal_if(!appl_shm->exit_flag, + "em_eo_add_queue_sync():%" PRI_STAT "\n" + "EO:%" PRI_EO " Q:%" PRI_QUEUE "", + ret, perf_shm->eo[j], queue); + em_queue_delete(queue); + return; + } + + /* Link scheduled queues */ + q_ctx->sch_q.next_queue = next_queue; + + /* + * Create a new unscheduled queue + */ + queue_unscheduled = + em_queue_create("unscheduled_queue", + EM_QUEUE_TYPE_UNSCHEDULED, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, + &unsch_conf); + if (queue_unscheduled == EM_QUEUE_UNDEF) { + APPL_PRINT("Max nbr of supported queues: %d\n", + 3 * i + 2); + return; + } + q_ctx->unsch_q.this_queue = queue_unscheduled; + + /* Link unscheduled queues */ + q_ctx->unsch_q.next_queue = next_unscheduled; + + /* + * Set the same top level queue context for both the + * scheduled and the unscheduled queue, access queue + * specific context by using q_ctx->sch_q.* or + * q_ctx->unsch_q.* - this eliminates the need to call + * em_queue_get_context() for each event for the + * unscheduled queues + */ + ret = em_queue_set_context(queue_unscheduled, q_ctx); + test_fatal_if(ret != EM_OK, + "em_queue_set_context:%" PRI_STAT "\n" + "Unscheduled queue:%" PRI_QUEUE "", + ret, queue_unscheduled); + + /* Sanity check */ + test_fatal_if(em_queue_get_context(queue) != + em_queue_get_context(queue_unscheduled), + "em_queue_get_context failed."); + + next_queue = queue; + next_unscheduled = queue_unscheduled; + } + + /* Connect first scheduled queue to the last */ + q_ctx = &perf_shm->queue_context_tbl[i + 0]; + q_ctx->sch_q.next_queue = next_queue; + q_ctx->unsch_q.next_queue = next_unscheduled; + } +} + +/** + * Print test statistics + */ +static void +print_test_statistics(test_status_t *test_status, int print_header, + core_stat_t core_stat[]) +{ + const int num_cores = test_status->num_cores; + const uint64_t cpu_hz = test_status->cpu_hz; + const double cpu_mhz = test_status->cpu_mhz; + const uint64_t total_events = (uint64_t)num_cores * EVENTS_PER_SAMPLE; + const uint64_t print_count = test_status->print_count++; + env_time_t total_time = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) + total_time = env_time_sum(total_time, core_stat[i].diff_time); + + double cycles_per_event = 0.0; + double events_per_sec = 0.0; + + if (likely(total_events > 0)) + cycles_per_event = env_time_to_cycles(total_time, cpu_hz) / + (double)total_events; + if (likely(cycles_per_event > 0)) /* Million events/s: */ + events_per_sec = cpu_mhz * num_cores / cycles_per_event; + + /* + * Print without latency statistics + */ + if (!MEASURE_LATENCY) { + if (print_header) + APPL_PRINT(RESULT_PRINTF_HDR); + APPL_PRINT(RESULT_PRINTF_FMT, + cycles_per_event, events_per_sec, + cpu_mhz, print_count); + return; + } + + /* + * Print with latency statistics + */ + uint64_t latency_events = 0; + env_time_t latency_hi_ave = ENV_TIME_NULL; + env_time_t latency_hi_max = ENV_TIME_NULL; + env_time_t latency_lo_ave = ENV_TIME_NULL; + env_time_t latency_lo_max = ENV_TIME_NULL; + + for (int i = 0; i < num_cores; i++) { + latency_events += core_stat[i].latency.events; + + latency_hi_ave = env_time_sum(latency_hi_ave, + core_stat[i].latency.hi_prio_ave); + latency_lo_ave = env_time_sum(latency_lo_ave, + core_stat[i].latency.lo_prio_ave); + + if (env_time_cmp(core_stat[i].latency.hi_prio_max, + latency_hi_max) > 0) { + latency_hi_max = core_stat[i].latency.hi_prio_max; + } + if (env_time_cmp(core_stat[i].latency.lo_prio_max, + latency_lo_max) > 0) { + latency_lo_max = core_stat[i].latency.lo_prio_max; + } + } + + double lat_per_hi_ave = 0.0; + double lat_per_lo_ave = 0.0; + + if (likely(latency_events > 0)) { + lat_per_hi_ave = env_time_to_cycles(latency_hi_ave, cpu_hz) / + (double)latency_events; + lat_per_lo_ave = env_time_to_cycles(latency_lo_ave, cpu_hz) / + (double)latency_events; + } + + if (print_header) + APPL_PRINT(RESULT_PRINTF_LATENCY_HDR); + APPL_PRINT(RESULT_PRINTF_LATENCY_FMT, + cycles_per_event, events_per_sec, lat_per_hi_ave, + env_time_to_cycles(latency_hi_max, cpu_hz), + lat_per_lo_ave, + env_time_to_cycles(latency_lo_max, cpu_hz), + cpu_mhz, print_count); +} + +/** + * Free the input event and allocate a new one instead + */ +static inline em_event_t +alloc_free_per_event(em_event_t event) +{ + perf_event_t *perf_event = em_event_pointer(event); + env_time_t send_time = perf_event->send_time; + int seq = perf_event->seq; + uint32_t event_size = em_event_get_size(event); + + em_free(event); + + event = em_alloc(event_size, EM_EVENT_TYPE_SW, perf_shm->pool); + + perf_event = em_event_pointer(event); + + perf_event->send_time = send_time; + perf_event->seq = seq; + + return event; +} + +/** + * Measure the scheduling latency per event + */ +static inline void +measure_latency(perf_event_t *const perf_event, queue_context_t *const q_ctx, + env_time_t recv_time) +{ + const int core = em_core_id(); + core_stat_t *const cstat = &perf_shm->core_stat[core]; + const env_time_t send_time = perf_event->send_time; + env_time_t latency; + + if (perf_shm->test_status.reset_flag || + cstat->events == 0 || cstat->events >= EVENTS_PER_SAMPLE) + return; + + cstat->latency.events++; + + latency = env_time_diff(recv_time, send_time); + + if (q_ctx->sch_q.prio == EM_QUEUE_PRIO_HIGH) { + cstat->latency.hi_prio_ave = + env_time_sum(cstat->latency.hi_prio_ave, latency); + if (env_time_cmp(latency, cstat->latency.hi_prio_max) > 0) + cstat->latency.hi_prio_max = latency; + } else { + cstat->latency.lo_prio_ave = + env_time_sum(cstat->latency.lo_prio_ave, latency); + if (env_time_cmp(latency, cstat->latency.lo_prio_max) > 0) + cstat->latency.lo_prio_max = latency; + } +} diff --git a/programs/performance/timer_test_periodic.c b/programs/performance/timer_test_periodic.c index 28fe3282..fe5f4030 100644 --- a/programs/performance/timer_test_periodic.c +++ b/programs/performance/timer_test_periodic.c @@ -1,2152 +1,2157 @@ -/* - * Copyright (c) 2020-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine timer test for periodic timeouts. - * - * see instructions - string at timer_test_periodic.h. - * - * Exception/error management is simplified and aborts on any error. - */ -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "cm_setup.h" -#include "cm_error_handler.h" - -#include "timer_test_periodic.h" - -#define VERSION "WIP v0.9" -struct { - int num_periodic; - uint64_t res_ns; - uint64_t res_hz; - uint64_t period_ns; - int64_t first_ns; - uint64_t max_period_ns; - uint64_t min_period_ns; - uint64_t min_work_ns; - uint64_t max_work_ns; - unsigned int work_prop; - int clock_src; - const char *csv; - int num_runs; - int tracebuf; - int stoplim; - int noskip; - int profile; - int dispatch; - int jobs; - long cpucycles; - int info_only; - int usehuge; /* for trace buffer */ - int bg_events; - uint64_t bg_time_ns; - int bg_size; - int bg_chunk; - int mz_mb; - int mz_huge; - uint64_t mz_ns; - int abort; /* for testing abnormal exit */ - int num_timers; - int no_del; - -} g_options = { .num_periodic = 1, /* defaults for basic check */ - .res_ns = DEF_RES_NS, - .res_hz = 0, - .period_ns = DEF_PERIOD * DEF_RES_NS, - .first_ns = 0, - .max_period_ns = 0, /* max,min updated in init if not given cmdline */ - .min_period_ns = 0, - .min_work_ns = 0, - .max_work_ns = 0, - .work_prop = 0, - .clock_src = EM_TIMER_CLKSRC_DEFAULT, - .csv = NULL, - .num_runs = 1, - .tracebuf = DEF_TMO_DATA, - .stoplim = ((STOP_THRESHOLD * DEF_TMO_DATA) / 100), - .noskip = 1, - .profile = 0, - .dispatch = 0, - .jobs = 0, - .cpucycles = 0, - .info_only = 0, - .usehuge = 0, - .bg_events = 0, - .bg_time_ns = 10000, - .bg_size = 5000 * 1024, - .bg_chunk = 50 * 1024, - .mz_mb = 0, - .mz_huge = 0, - .mz_ns = 0, - .abort = 0, - .num_timers = 1, - .no_del = 0 - }; - -typedef struct global_stats_t { - uint64_t num_late; /* ack late */ - int64_t max_dev_ns; /* +- max deviation form target */ - int64_t max_early_ns; /* max arrival before target time */ - uint64_t num_tmo; /* total received tmo count */ - int max_cpu; /* max CPU load % (any single) */ - uint64_t max_dispatch; /* max EO receive time */ -} global_stats_t; - -typedef struct app_eo_ctx_t { - e_state state; - em_tmo_t heartbeat_tmo; - em_timer_t test_tmr[MAX_TEST_TIMERS]; - em_queue_t hb_q; - em_queue_t test_q; - em_queue_t stop_q; - em_queue_t bg_q; - int cooloff; - int last_hbcount; - uint64_t hb_hz; - uint64_t test_hz; - uint64_t time_hz; - uint64_t meas_test_hz; - uint64_t meas_time_hz; - uint64_t linux_hz; - uint64_t max_period; - time_stamp started; - time_stamp stopped; - void *bg_data; - void *mz_data; - uint64_t mz_count; - int stop_sent; - em_atomic_group_t agrp; - global_stats_t global_stat; - tmo_setup *tmo_data; - core_data cdat[MAX_CORES]; -} app_eo_ctx_t; - -typedef struct timer_app_shm_t { - em_pool_t pool; - app_eo_ctx_t eo_context; - em_timer_t hb_tmr; - em_timer_t test_tmr[MAX_TEST_TIMERS]; -} timer_app_shm_t; - -#if defined(__aarch64__) -static inline uint64_t get_cpu_cycle(void) -{ - uint64_t r; - - __asm__ volatile ("mrs %0, pmccntr_el0" : "=r"(r) :: "memory"); - return r; -} -#elif defined(__x86_64__) -static inline uint64_t get_cpu_cycle(void) -{ - uint32_t a, d; - - __asm__ volatile ("rdtsc" : "=a"(a), "=d"(d) :: "memory"); - return (uint64_t)a | ((uint64_t)d) << 32; -} -#else -#error "Code supports Aarch64 or x86_64" -#endif - -/* EM-thread locals */ -static __thread timer_app_shm_t *m_shm; - -static void start_periodic(app_eo_ctx_t *eo_context); -static int handle_periodic(app_eo_ctx_t *eo_context, em_event_t event); -static void send_stop(app_eo_ctx_t *eo_context); -static void handle_heartbeat(app_eo_ctx_t *eo_context, em_event_t event); -static void usage(void); -static int parse_my_args(int first, int argc, char *argv[]); -static void analyze(app_eo_ctx_t *eo_ctx); -static void write_trace(app_eo_ctx_t *eo_ctx, const char *name); -static void cleanup(app_eo_ctx_t *eo_ctx); -static int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx); -static uint64_t linux_time_ns(void); -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); -static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo); -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, void *q_context); -static time_stamp get_time(void); -static uint64_t time_to_ns(time_stamp t); -static time_stamp time_diff(time_stamp t2, time_stamp t1); -static time_stamp time_sum(time_stamp t1, time_stamp t2); -static int arg_to_ns(const char *s, int64_t *val); -static void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx); -static void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx); -static void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, - uint64_t tmrtick, time_stamp timetick); -static void timing_statistics(app_eo_ctx_t *eo_ctx); -static void add_prof(app_eo_ctx_t *eo_ctx, time_stamp t1, e_op op, app_msg_t *msg); -static int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, - time_stamp *min, time_stamp *max, time_stamp *first, - int64_t *tgt_max_ns, int64_t *max_early_ns); -static tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last); -static uint64_t random_tmo_ns(void); -static uint64_t random_work_ns(rnd_state_t *rng); -static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx); -static void exit_cb(em_eo_t eo); -static void send_bg_events(app_eo_ctx_t *eo_ctx); -static int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx); -static int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx); -static em_status_t my_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args); -static void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize); -static void free_tracebuf(void *ptr, size_t realsize); -static void prefault(void *buf, size_t size); -static void show_global_stats(app_eo_ctx_t *eo_ctx); - -/* --------------------------------------- */ -em_status_t my_error_handler(em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args) -{ - if (escope == 0xDEAD) { /* test_fatal_if */ - char *file = va_arg(args, char*); - const char *func = va_arg(args, const char*); - const int line = va_arg(args, const int); - const char *format = va_arg(args, const char*); - const char *base = basename(file); - - #pragma GCC diagnostic push - #pragma GCC diagnostic ignored "-Wformat-nonliteral" - fprintf(stderr, "FATAL - %s:%d, %s():\n", - base, line, func); - vfprintf(stderr, format, args); - #pragma GCC diagnostic pop - } - return test_error_handler(eo, error, escope, args); -} - -void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, - em_queue_t *queue, void **q_ctx) -{ - static int count; - app_eo_ctx_t *const my_eo_ctx = *eo_ctx; - - (void)eo; - (void)queue; - (void)q_ctx; - - if (unlikely(!my_eo_ctx)) - return; - - if (g_options.dispatch) { - for (int i = 0; i < num; i++) { - app_msg_t *msg = em_event_pointer(events[i]); - - add_trace(my_eo_ctx, msg->id, OP_PROF_ENTER_CB, - 0, count++, -1); - } - } - my_eo_ctx->cdat[em_core_id()].enter = get_time(); -} - -void exit_cb(em_eo_t eo) -{ - static int count; - app_eo_ctx_t *const my_eo_ctx = em_eo_get_context(eo); - - if (unlikely(!my_eo_ctx)) - return; - - if (g_options.dispatch) - add_trace(my_eo_ctx, -1, OP_PROF_EXIT_CB, 0, count++, -1); - - core_data *cdat = &my_eo_ctx->cdat[em_core_id()]; - time_stamp took; - - if (__atomic_load_n(&my_eo_ctx->state, __ATOMIC_ACQUIRE) == STATE_RUN) { - took = time_diff(get_time(), cdat->enter); - cdat->acc_time = time_sum(cdat->acc_time, took); - } -} - -void prefault(void *buf, size_t size) -{ - uint8_t *ptr = (uint8_t *)buf; - - /* write all pages to allocate and pre-fault (reduce runtime jitter) */ - APPL_PRINT("Pre-faulting %lu bytes at %p (EM core %d)\n", size, buf, em_core_id()); - for (size_t i = 0; i < size; i += 4096) - *(ptr + i) = (uint8_t)i; -} - -void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize) -{ - if (g_options.usehuge) { - *realsize = (numbuf + 1) * bufsize; - void *ptr = mmap(NULL, *realsize, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB | MAP_LOCKED, - -1, 0); - if (ptr == MAP_FAILED) { - APPL_PRINT("Huge page mapping failed for trace buffer (%lu bytes)\n", - *realsize); - return NULL; - } else { - return ptr; - } - - } else { - void *buf = calloc(numbuf + 1, bufsize); - - *realsize = numbuf * bufsize; - prefault(buf, *realsize); - return buf; - } -} - -void free_tracebuf(void *ptr, size_t realsize) -{ - if (g_options.usehuge) - munmap(ptr, realsize); - else - free(ptr); -} - -inline time_stamp get_time(void) -{ - time_stamp t; - - if (unlikely(g_options.cpucycles)) - t.u64 = get_cpu_cycle(); - else - t.odp = odp_time_global(); - return t; -} - -uint64_t time_to_ns(time_stamp t) -{ - double ns; - - if (unlikely(g_options.cpucycles)) { /* todo drop cpucycles choice to get rid of this? */ - double hz = (double)m_shm->eo_context.time_hz; - - ns = (1000000000.0 / hz) * (double)t.u64; - } else { - ns = (double)odp_time_to_ns(t.odp); - } - return round(ns); -} - -time_stamp time_diff(time_stamp t2, time_stamp t1) -{ - time_stamp t; - - if (unlikely(g_options.cpucycles)) - t.u64 = t2.u64 - t1.u64; - else - t.odp = odp_time_diff(t2.odp, t1.odp); - - return t; -} - -time_stamp time_sum(time_stamp t1, time_stamp t2) -{ - time_stamp t; - - if (unlikely(g_options.cpucycles)) - t.u64 = t1.u64 + t2.u64; - else - t.odp = odp_time_sum(t1.odp, t2.odp); - return t; -} - -uint64_t linux_time_ns(void) -{ - struct timespec ts; - uint64_t ns; - - clock_gettime(CLOCK_MONOTONIC_RAW, &ts); - ns = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); - return ns; -} - -int arg_to_ns(const char *s, int64_t *val) -{ - char *endp; - int64_t num, mul = 1; - - num = strtol(s, &endp, 0); - if (num == 0 && *s != '0') - return 0; - - if (*endp != '\0') - switch (*endp) { - case 'n': - mul = 1; /* ns */ - break; - case 'u': - mul = 1000; /* us */ - break; - case 'm': - mul = 1000 * 1000; /* ms */ - break; - case 's': - mul = 1000 * 1000 * 1000; /* s */ - break; - default: - return 0; - } - - *val = num * mul; - return 1; -} - -void send_stop(app_eo_ctx_t *eo_ctx) -{ - em_status_t ret; - - if (!eo_ctx->stop_sent) { /* in case state change gets delayed on event overload */ - em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate stop event!\n"); - - app_msg_t *msg = em_event_pointer(event); - - msg->command = CMD_DONE; - msg->id = em_core_id(); - ret = em_send(event, eo_ctx->stop_q); - test_fatal_if(ret != EM_OK, "em_send(): %s %" PRI_STAT, __func__, ret); - eo_ctx->stop_sent++; - } -} - -void cleanup(app_eo_ctx_t *eo_ctx) -{ - time_stamp tz = {0}; - int cores = em_core_count(); - - for (int i = 0; i < cores; i++) { - eo_ctx->cdat[i].count = 0; - eo_ctx->cdat[i].cancelled = 0; - eo_ctx->cdat[i].jobs_deleted = 0; - eo_ctx->cdat[i].jobs = 0; - eo_ctx->cdat[i].acc_time = tz; - } -} - -void write_trace(app_eo_ctx_t *eo_ctx, const char *name) -{ - int cores = em_core_count(); - FILE *fle = stdout; - - if (strcmp(name, "stdout")) - fle = fopen(g_options.csv, "w"); - if (fle == NULL) { - APPL_PRINT("FAILED to open trace file\n"); - return; - } - - fprintf(fle, "\n\n#BEGIN TRACE FORMAT 2\n"); /* for offline analyzers */ - fprintf(fle, "res_ns,res_hz,period_ns,max_period_ns,clksrc,num_tmo,loops,"); - fprintf(fle, "traces,noskip,SW-ver,bg,mz,timers\n"); - fprintf(fle, "%lu,%lu,%lu,%lu,%d,%d,%d,%d,%d,%s,\"%d/%lu\",\"%d/%lu\",%d\n", - g_options.res_ns, - g_options.res_hz, - g_options.period_ns, - g_options.max_period_ns, - g_options.clock_src, - g_options.num_periodic, - g_options.num_runs, - g_options.tracebuf, - g_options.noskip, - VERSION, - g_options.bg_events, g_options.bg_time_ns / 1000UL, - g_options.mz_mb, g_options.mz_ns / 1000000UL, - g_options.num_timers); - fprintf(fle, "time_hz,meas_time_hz,timer_hz,meas_timer_hz,linux_hz\n"); - fprintf(fle, "%lu,%lu,%lu,%lu,%lu\n", - eo_ctx->time_hz, - eo_ctx->meas_time_hz, - eo_ctx->test_hz, - eo_ctx->meas_test_hz, - eo_ctx->linux_hz); - - fprintf(fle, "tmo_id,period_ns,period_ticks,ack_late"); - fprintf(fle, ",start_tick,start_ns,first_ns,first\n"); - for (int i = 0; i < g_options.num_periodic; i++) { - fprintf(fle, "%d,%lu,%lu,%lu,%lu,%lu,%lu,%lu\n", - i, eo_ctx->tmo_data[i].period_ns, - eo_ctx->tmo_data[i].ticks, - eo_ctx->tmo_data[i].ack_late, - eo_ctx->tmo_data[i].start, - time_to_ns(eo_ctx->tmo_data[i].start_ts), - (uint64_t)eo_ctx->tmo_data[i].first_ns, - eo_ctx->tmo_data[i].first); - } - - fprintf(fle, "id,op,tick,time_ns,linux_time_ns,counter,core,timer\n"); - for (int c = 0; c < cores; c++) { - for (int i = 0; i < eo_ctx->cdat[c].count; i++) { - uint64_t ns; - - if (eo_ctx->cdat[c].trc[i].op >= OP_PROF_ACK) { - /* it's tick diff */ - ns = time_to_ns(eo_ctx->cdat[c].trc[i].linuxt); - } else { /* it's ns from linux */ - ns = eo_ctx->cdat[c].trc[i].linuxt.u64; - } - - fprintf(fle, "%d,%s,%lu,%lu,%lu,%d,%d,%d\n", - eo_ctx->cdat[c].trc[i].id, - op_labels[eo_ctx->cdat[c].trc[i].op], - eo_ctx->cdat[c].trc[i].tick, - time_to_ns(eo_ctx->cdat[c].trc[i].ts), - ns, - eo_ctx->cdat[c].trc[i].count, - c, - eo_ctx->cdat[c].trc[i].tidx); - } - } - fprintf(fle, "#END TRACE\n\n"); - if (fle != stdout) - fclose(fle); -} - -void show_global_stats(app_eo_ctx_t *eo_ctx) -{ - APPL_PRINT("\nTOTAL STATS:\n"); - APPL_PRINT(" Num tmo: %lu\n", eo_ctx->global_stat.num_tmo); - APPL_PRINT(" Num late ack: %lu", eo_ctx->global_stat.num_late); - APPL_PRINT(" (%lu%%)\n", - (eo_ctx->global_stat.num_late * 100) / eo_ctx->global_stat.num_tmo); - APPL_PRINT(" Max early arrival: %.1fus %s\n", - ((double)eo_ctx->global_stat.max_early_ns) / 1000.0, - (uint64_t)llabs(eo_ctx->global_stat.max_early_ns) > g_options.res_ns ? "!" : ""); - APPL_PRINT(" Max diff from tgt: %.1fus (res %.1fus) %s\n", - ((double)eo_ctx->global_stat.max_dev_ns) / 1000.0, - (double)g_options.res_ns / 1000.0, - (uint64_t)llabs(eo_ctx->global_stat.max_dev_ns) > (2 * g_options.res_ns) ? - ">2x res!" : ""); - APPL_PRINT(" Max CPU load: %d%%\n", eo_ctx->global_stat.max_cpu); - if (eo_ctx->global_stat.max_dispatch) - APPL_PRINT(" Max EO rcv time: %luns\n", eo_ctx->global_stat.max_dispatch); - APPL_PRINT("\n"); -} - -uint64_t random_tmo_ns(void) -{ - uint64_t r = random() % (g_options.max_period_ns - g_options.min_period_ns + 1); - - return r + g_options.min_period_ns; /* ns between min/max period */ -} - -uint64_t random_work_ns(rnd_state_t *rng) -{ - uint64_t r; - int32_t r1; - - random_r(&rng->rdata, &r1); - r = (uint64_t)r1; - if (r % 100 >= g_options.work_prop) /* propability of work roughly */ - return 0; - - random_r(&rng->rdata, &r1); - r = (uint64_t)r1 % (g_options.max_work_ns - g_options.min_work_ns + 1); - return r + g_options.min_work_ns; -} - -tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last) -{ - int cores = em_core_count(); - tmo_trace *trc = NULL; - int last_count = 0; - - for (int c = 0; c < cores; c++) { - for (int i = 0; i < eo_ctx->cdat[c].count; i++) { /* find id */ - if (eo_ctx->cdat[c].trc[i].op == OP_TMO && - eo_ctx->cdat[c].trc[i].id == id) { /* this TMO */ - if (eo_ctx->cdat[c].trc[i].count == count) { - trc = &eo_ctx->cdat[c].trc[i]; - } else { - /* always run through for last_count */ - if (eo_ctx->cdat[c].trc[i].count > last_count) - last_count = eo_ctx->cdat[c].trc[i].count; - } - } - } - } - *last = last_count; - return trc; -} - -int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, - time_stamp *min, time_stamp *max, time_stamp *first, - int64_t *tgt_max, int64_t *max_early_ns) -{ - int num = 0; - time_stamp diff; - time_stamp prev = {0}; - int last = 0; - int last_num; - uint64_t period_ns = eo_ctx->tmo_data[id].period_ns; - uint64_t first_ns = time_to_ns(eo_ctx->tmo_data[id].start_ts); - int64_t max_tgt_diff = 0; - - max->u64 = 0; - min->u64 = INT64_MAX; - - /* find in sequential order for diff to work. TODO this gets very slow with many tmos */ - - for (int count = 1; count < g_options.tracebuf; count++) { - tmo_trace *tmo = find_tmo(eo_ctx, id, count, &last_num); - - if (!tmo) { - if (last != count - 1) - APPL_PRINT("MISSING TMO: id %d, count %d\n", id, count); - *tgt_max = max_tgt_diff; - return num; - } - last++; - if (!num) { /* skip first for min/max but store time */ - diff = time_diff(tmo->ts, eo_ctx->tmo_data[id].start_ts); - *first = diff; - if (eo_ctx->tmo_data[id].first_ns != eo_ctx->tmo_data[id].period_ns) - first_ns = time_to_ns(tmo->ts); /* ignore first */ - - } else { - diff = time_diff(tmo->ts, prev); - if (last_num > count) { /*skip last diff, could be while stopping */ - if (time_to_ns(diff) > time_to_ns(*max)) - *max = diff; - if (time_to_ns(diff) < time_to_ns(*min)) - *min = diff; - - /* calculate distance to target */ - uint64_t tgt = first_ns + count * period_ns; - int64_t tgtdiff = (int64_t)time_to_ns(tmo->ts) - (int64_t)tgt; - - if (llabs(max_tgt_diff) < llabs(tgtdiff)) - max_tgt_diff = tgtdiff; - if (tgtdiff < *max_early_ns) - *max_early_ns = tgtdiff; - } - } - prev = tmo->ts; - num++; - } - *tgt_max = max_tgt_diff; - return num; -} - -void timing_statistics(app_eo_ctx_t *eo_ctx) -{ - /* basic statistics, more with offline tools (-w) */ - time_stamp max_ts = {0}, min_ts = {0}, first_ts = {0}; - int64_t tgt_max = 0; - const int cores = em_core_count(); - uint64_t system_used = time_to_ns(time_diff(eo_ctx->stopped, eo_ctx->started)); - - for (int c = 0; c < cores; c++) { - core_data *cdat = &eo_ctx->cdat[c]; - uint64_t eo_used = time_to_ns(cdat->acc_time); - double perc = (double)eo_used / (double)system_used * 100; - - if (perc > 100) - perc = 100; - APPL_PRINT("STAT_CORE [%d]: %d tmos, %d jobs, EO used %.1f%% CPU time\n", - c, cdat->count, cdat->jobs, perc); - if (perc > eo_ctx->global_stat.max_cpu) - eo_ctx->global_stat.max_cpu = round(perc); - eo_ctx->global_stat.num_tmo += cdat->count; - } - - for (int id = 0; id < g_options.num_periodic; id++) { /* each timeout */ - tmo_setup *tmo_data = &eo_ctx->tmo_data[id]; - int64_t max_early = 0; - int num = do_one_tmo(id, eo_ctx, &min_ts, &max_ts, &first_ts, &tgt_max, &max_early); - - APPL_PRINT("STAT-TMO [%d]: %d tmos (tmr#%d), period %luns (", - id, num, tmo_data->tidx, tmo_data->period_ns); - if (num > 1) { - int64_t maxdiff = (int64_t)time_to_ns(max_ts) - - (int64_t)tmo_data->period_ns; - - int64_t mindiff = (int64_t)time_to_ns(min_ts) - - (int64_t)tmo_data->period_ns; - - APPL_PRINT("%lu ticks), interval %ldns ... +%ldns", - tmo_data->ticks, mindiff, maxdiff); - APPL_PRINT(" (%ldus ... +%ldus)\n", mindiff / 1000, maxdiff / 1000); - APPL_PRINT(" - Max diff from target %.2fus\n", (double)tgt_max / 1000); - if (llabs(tgt_max) > llabs(eo_ctx->global_stat.max_dev_ns)) - eo_ctx->global_stat.max_dev_ns = tgt_max; - if (max_early < eo_ctx->global_stat.max_early_ns) - eo_ctx->global_stat.max_early_ns = max_early; - } else { - APPL_PRINT("%lu ticks), 1st period %lu\n", - tmo_data->ticks, time_to_ns(first_ts)); - } - if (num == 0) - APPL_PRINT(" ERROR - no timeouts received\n"); - } - - if (!g_options.dispatch) - return; - - /* - * g_options.dispatch set - * - * Calculate EO rcv min-max-avg: - */ - uint64_t min = UINT64_MAX, max = 0, avg = 0; - time_stamp prev_ts = { 0 }; - int prev_count = 0; - int num = 0; - - for (int c = 0; c < cores; c++) { - for (int i = 0; i < g_options.tracebuf; i++) { - core_data *cdat = &eo_ctx->cdat[c]; - - if (cdat->trc[i].op == OP_PROF_ENTER_CB) { - prev_ts = cdat->trc[i].ts; - prev_count = cdat->trc[i].count; - } else if (cdat->trc[i].op == OP_PROF_EXIT_CB) { - time_stamp diff_ts; - uint64_t ns; - - if (prev_count != cdat->trc[i].count) - APPL_PRINT("No enter cnt=%d\n", prev_count); - - diff_ts = time_diff(cdat->trc[i].ts, prev_ts); - ns = time_to_ns(diff_ts); - - if (ns < min) - min = ns; - if (ns > max) - max = ns; - avg += ns; - num++; - } - } - } - - APPL_PRINT("%d dispatcher enter-exit samples\n", num); - APPL_PRINT("PROF-DISPATCH rcv time: min %luns, max %luns, avg %luns\n", - min, max, num > 0 ? avg / num : 0); - - if (max > eo_ctx->global_stat.max_dispatch) - eo_ctx->global_stat.max_dispatch = max; -} - -void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx) -{ - uint64_t min = UINT64_MAX; - uint64_t max = 0, avg = 0, num = 0; - uint64_t t; - - for (int c = 0; c < cores; c++) { - for (int i = 0; i < g_options.tracebuf; i++) { - if (eo_ctx->cdat[c].trc[i].op == op) { - t = time_to_ns(eo_ctx->cdat[c].trc[i].linuxt); - if (min > t) - min = t; - if (max < t) - max = t; - avg += t; - num++; - } - } - } - if (num) - APPL_PRINT("%s: %lu samples: min %luns, max %luns, avg %luns\n", - op_labels[op], num, min, max, avg / num); -} - -void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx) -{ - APPL_PRINT("API profile statistics:\n"); - profile_statistics(OP_PROF_CREATE, cores, eo_ctx); - profile_statistics(OP_PROF_SET, cores, eo_ctx); - profile_statistics(OP_PROF_ACK, cores, eo_ctx); - profile_statistics(OP_PROF_DELETE, cores, eo_ctx); - profile_statistics(OP_PROF_CANCEL, cores, eo_ctx); -} - -void analyze(app_eo_ctx_t *eo_ctx) -{ - int cores = em_core_count(); - int cancelled = 0; - int job_del = 0; - - timing_statistics(eo_ctx); - - if (g_options.profile) - profile_all_stats(cores, eo_ctx); - - for (int c = 0; c < cores; c++) { - cancelled += eo_ctx->cdat[c].cancelled; - job_del += eo_ctx->cdat[c].jobs_deleted; - } - - show_global_stats(eo_ctx); - - /* write trace file */ - if (g_options.csv != NULL) - write_trace(eo_ctx, g_options.csv); - - APPL_PRINT("%d/%d timeouts were cancelled\n", cancelled, g_options.num_periodic); - - if (g_options.bg_events) - APPL_PRINT("%d/%d bg jobs were deleted\n", job_del, g_options.bg_events); - if (g_options.mz_mb) - APPL_PRINT("%lu memzeros\n", eo_ctx->mz_count); - double span = time_to_ns(eo_ctx->stopped) - time_to_ns(eo_ctx->started); - - span /= 1000000000; - APPL_PRINT("Timer runtime %fs\n", span); - - test_fatal_if(cancelled != g_options.num_periodic, - "Not all tmos deleted (did not arrive at all?)\n"); -} - -int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx) -{ - int core = em_core_id(); - tmo_trace *tmo = &eo_ctx->cdat[core].trc[eo_ctx->cdat[core].count]; - - if (eo_ctx->cdat[core].count < g_options.tracebuf) { - if (op < OP_PROF_ACK && (tidx != -1)) /* to be a bit faster for profiling */ - tmo->tick = em_timer_current_tick(eo_ctx->test_tmr[tidx]); - tmo->op = op; - tmo->id = id; - tmo->ts = get_time(); - tmo->linuxt.u64 = ns; - tmo->count = count; - tmo->tidx = tidx; - eo_ctx->cdat[core].count++; - } - - return (eo_ctx->cdat[core].count >= g_options.stoplim) ? 0 : 1; -} - -void send_bg_events(app_eo_ctx_t *eo_ctx) -{ - for (int n = 0; n < g_options.bg_events; n++) { - em_event_t event = em_alloc(sizeof(app_msg_t), - EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate bg event!\n"); - app_msg_t *msg = em_event_pointer(event); - - msg->command = CMD_BGWORK; - msg->count = 0; - msg->id = n + 1; - msg->arg = g_options.bg_time_ns; - test_fatal_if(em_send(event, eo_ctx->bg_q) != EM_OK, "Can't allocate bg event!\n"); - } -} - -void start_periodic(app_eo_ctx_t *eo_ctx) -{ - app_msg_t *msg; - em_event_t event; - em_tmo_t tmo; - em_tmo_flag_t flag = EM_TMO_FLAG_PERIODIC; - time_stamp t1 = {0}; - uint64_t max_period = 0; - int tidx; - - if (g_options.noskip) - flag |= EM_TMO_FLAG_NOSKIP; - eo_ctx->stop_sent = 0; - eo_ctx->started = get_time(); - - for (int i = 0; i < g_options.num_periodic; i++) { - event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, - "Can't allocate test event (%ldB)!\n", - sizeof(app_msg_t)); - - msg = em_event_pointer(event); - msg->command = CMD_TMO; - msg->count = 0; - msg->id = i; - tidx = random() % g_options.num_timers; - msg->tidx = tidx; - - if (eo_ctx->tmo_data[i].handle == EM_TMO_UNDEF) { /* not -q */ - if (g_options.profile) - t1 = get_time(); - tmo = em_tmo_create(m_shm->test_tmr[tidx], flag, eo_ctx->test_q); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_CREATE, msg); - test_fatal_if(tmo == EM_TMO_UNDEF, "Can't allocate test_tmo!\n"); - eo_ctx->tmo_data[i].handle = tmo; - } - msg->tmo = eo_ctx->tmo_data[i].handle; - eo_ctx->tmo_data[i].tidx = tidx; - - double ns = 1000000000 / (double)eo_ctx->test_hz; - uint64_t period; - uint64_t first = 0; - em_status_t stat; - - if (g_options.period_ns) { - eo_ctx->tmo_data[i].period_ns = g_options.period_ns; - } else { /* 0: use random */ - eo_ctx->tmo_data[i].period_ns = random_tmo_ns(); - } - if (max_period < eo_ctx->tmo_data[i].period_ns) - max_period = eo_ctx->tmo_data[i].period_ns; - period = round((double)eo_ctx->tmo_data[i].period_ns / ns); - - if (EXTRA_PRINTS && i == 0) { - APPL_PRINT("Timer Hz %lu ", eo_ctx->test_hz); - APPL_PRINT("= Period ns: %f => period %lu ticks\n", ns, period); - } - - test_fatal_if(period < 1, "timer resolution is too low!\n"); - - if (g_options.first_ns < 0) /* use random */ - eo_ctx->tmo_data[i].first_ns = random_tmo_ns(); - else if (g_options.first_ns == 0) /* use period */ - eo_ctx->tmo_data[i].first_ns = eo_ctx->tmo_data[i].period_ns; - else - eo_ctx->tmo_data[i].first_ns = g_options.first_ns; - - first = round((double)eo_ctx->tmo_data[i].first_ns / ns); - if (!first) - first = 1; - eo_ctx->tmo_data[i].first = first; - - eo_ctx->tmo_data[i].start_ts = get_time(); - eo_ctx->tmo_data[i].start = em_timer_current_tick(m_shm->test_tmr[tidx]); - first += eo_ctx->tmo_data[i].start; - if (g_options.profile) - t1 = get_time(); - stat = em_tmo_set_periodic(eo_ctx->tmo_data[i].handle, first, period, event); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_SET, msg); - - if (unlikely(stat != EM_OK)) { - if (EXTRA_PRINTS) { - em_timer_tick_t now = em_timer_current_tick(eo_ctx->test_tmr[tidx]); - - APPL_PRINT("FAILED to set tmo, stat=%d: first=%lu, ", stat, first); - APPL_PRINT("now %lu (diff %ld), period=%lu\n", - now, (int64_t)first - (int64_t)now, period); - APPL_PRINT("(first_ns %lu)\n", eo_ctx->tmo_data[i].first_ns); - } - test_fatal_if(1, "Can't activate test tmo!\n"); - } - - eo_ctx->tmo_data[i].ack_late = 0; - eo_ctx->tmo_data[i].ticks = period; - eo_ctx->max_period = max_period; - eo_ctx->cooloff = (max_period / 1000000000ULL * 2) + 1; - if (eo_ctx->cooloff < 4) - eo_ctx->cooloff = 4; /* HB periods (sec) */ - } -} - -void add_prof(app_eo_ctx_t *eo_ctx, time_stamp t1, e_op op, app_msg_t *msg) -{ - time_stamp dif = time_diff(get_time(), t1); - - add_trace(eo_ctx, msg->id, op, dif.u64, msg->count, -1); - /* if this filled the buffer it's handled on next tmo */ -} - -int handle_periodic(app_eo_ctx_t *eo_ctx, em_event_t event) -{ - int core = em_core_id(); - app_msg_t *msg = (app_msg_t *)em_event_pointer(event); - int reuse = 1; - e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); - time_stamp t1 = {0}; - em_tmo_stats_t ctrs = { 0 }; /* init to avoid gcc warning with LTO */ - em_status_t ret; - - msg->count++; - - /* this is to optionally test abnormal exits only */ - if (unlikely(g_options.abort != 0) && abs(g_options.abort) <= msg->count) { - if (g_options.abort < 0) { /* cause segfault to test exception here */ - uint64_t *fault = NULL; - /* coverity[FORWARD_NULL] */ - msg->arg = *fault; - } else { - abort(); - } - } - - if (likely(state == STATE_RUN)) { /* add tmo trace */ - if (!add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx)) - send_stop(eo_ctx); /* triggers state change */ - - if (g_options.work_prop) { - uint64_t work = random_work_ns(&eo_ctx->cdat[core].rng); - - if (work) { /* add extra delay */ - time_stamp t2; - uint64_t ns = time_to_ns(get_time()); - - do { - t2 = get_time(); - } while (time_to_ns(t2) < (ns + work)); - add_trace(eo_ctx, msg->id, OP_WORK, work, msg->count, -1); - } - } - - /* only ack while in running state */ - add_trace(eo_ctx, msg->id, OP_ACK, 0, msg->count, msg->tidx); - if (g_options.profile) - t1 = get_time(); - em_status_t stat = em_tmo_ack(msg->tmo, event); - - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_ACK, msg); - if (unlikely(stat != EM_OK)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "ack() fail!\n"); - - } else if (state == STATE_COOLOFF) { /* trace, but cancel */ - em_event_t tmo_event = EM_EVENT_UNDEF; - - add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx); - em_tmo_get_stats(msg->tmo, &ctrs); - APPL_PRINT("STAT-ACK [%d]: %lu acks, %lu late, %lu skips\n", - msg->id, ctrs.num_acks, ctrs.num_late_ack, ctrs.num_period_skips); - eo_ctx->tmo_data[msg->id].ack_late = ctrs.num_late_ack; - eo_ctx->global_stat.num_late += ctrs.num_late_ack; - - if (unlikely(msg->id >= g_options.num_periodic)) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Corrupted tmo msg?\n"); - - if (g_options.profile) - t1 = get_time(); - if (g_options.no_del) { /* don't delete each round */ - ret = em_tmo_cancel(msg->tmo, &tmo_event); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_CANCEL, msg); - test_fatal_if(ret == EM_OK, "tmo_cancel ok, expecting fail here!\n"); - } else { - ret = em_tmo_delete(msg->tmo, &tmo_event); - if (g_options.profile) - add_prof(eo_ctx, t1, OP_PROF_DELETE, msg); - test_fatal_if(ret != EM_OK, "tmo_delete failed, ret %" PRI_STAT "!\n", ret); - eo_ctx->tmo_data[msg->id].handle = EM_TMO_UNDEF; - } - - eo_ctx->cdat[core].cancelled++; - if (unlikely(tmo_event != EM_EVENT_UNDEF)) { /* not expected as we have the event */ - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "periodic tmo delete returned evt!\n"); - } - add_trace(eo_ctx, msg->id, OP_CANCEL, 0, msg->count, msg->tidx); - reuse = 0; /* free this last tmo event of canceled tmo */ - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "Timeout in state %s!\n", state_labels[state]); - } - return reuse; -} - -void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, uint64_t tmrtick, - time_stamp timetick) -{ - uint64_t linux_t2 = linux_time_ns(); - time_stamp time_t2 = get_time(); - uint64_t tmr_t2 = em_timer_current_tick(eo_ctx->test_tmr[0]); - - linux_t2 = linux_t2 - linuxns; - time_t2 = time_diff(time_t2, timetick); - tmr_t2 = tmr_t2 - tmrtick; - APPL_PRINT("%lu timer ticks in %luns (linux time) ", tmr_t2, linux_t2); - double hz = 1000000000 / - ((double)linux_t2 / (double)tmr_t2); - APPL_PRINT("=> %.1fHz (%.1fMHz). Timer reports %luHz\n", hz, hz / 1000000, eo_ctx->test_hz); - eo_ctx->meas_test_hz = round(hz); - hz = 1000000000 / ((double)linux_t2 / (double)time_t2.u64); - APPL_PRINT("Timestamp measured: %.1fHz (%.1fMHz)\n", hz, hz / 1000000); - eo_ctx->meas_time_hz = round(hz); - - if (g_options.cpucycles == 1) /* use measured */ - eo_ctx->time_hz = eo_ctx->meas_time_hz; - if (g_options.cpucycles > 1) /* freq given */ - eo_ctx->time_hz = (uint64_t)g_options.cpucycles; - - test_fatal_if(tmr_t2 < 1, "TIMER SEEMS NOT RUNNING AT ALL!?"); -} - -int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx) -{ - static int count; - - add_trace(eo_ctx, -1, OP_MEMZERO, g_options.mz_mb, msg->count, -1); - if (eo_ctx->mz_data == NULL) { /* first time we only allocate */ - if (g_options.mz_huge) { - eo_ctx->mz_data = mmap(NULL, g_options.mz_mb * 1024UL * 1024UL, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | - MAP_HUGETLB | MAP_LOCKED, - -1, 0); - if (eo_ctx->mz_data == MAP_FAILED) - eo_ctx->mz_data = NULL; - } else { - eo_ctx->mz_data = malloc(g_options.mz_mb * 1024UL * 1024UL); - } - test_fatal_if(eo_ctx->mz_data == NULL, "mz_mem reserve failed!"); - } else { - memset(eo_ctx->mz_data, 0, g_options.mz_mb * 1024UL * 1024UL); - eo_ctx->mz_count++; - } - add_trace(eo_ctx, -1, OP_MEMZERO_END, g_options.mz_mb, count, -1); - __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED); - return 0; -} - -int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx) -{ - app_msg_t *msg = (app_msg_t *)em_event_pointer(evt); - time_stamp t1 = get_time(); - time_stamp ts; - int32_t rnd; - int core = em_core_id(); - uint64_t sum = 0; - - if (__atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE) != STATE_RUN) { - eo_ctx->cdat[core].jobs_deleted++; - if (EXTRA_PRINTS) - APPL_PRINT("Deleting job after %u iterations\n", msg->count); - return 0; /* stop & delete */ - } - - if (g_options.jobs) - add_trace(eo_ctx, -1, OP_BGWORK, msg->arg, msg->count, -1); - - msg->count++; - eo_ctx->cdat[core].jobs++; - int blocks = g_options.bg_size / g_options.bg_chunk; - - random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); - rnd = rnd % blocks; - uint64_t *dptr = (uint64_t *)((uintptr_t)eo_ctx->bg_data + rnd * g_options.bg_chunk); - /* printf("%d: %p - %p\n", rnd, eo_ctx->bg_data, dptr); */ - - do { - /* jump around memory reading from selected chunk */ - random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); - rnd = rnd % (g_options.bg_chunk / sizeof(uint64_t)); - /* printf("%d: %p - %p\n", rnd, eo_ctx->bg_data, dptr+rnd); */ - sum += *(dptr + rnd); - ts = time_diff(get_time(), t1); - } while (time_to_ns(ts) < msg->arg); - - *dptr = sum; - - if (g_options.mz_mb && msg->id == 1) { /* use only one job stream for memzero */ - static time_stamp last_mz = {0}; - - if (msg->count < 10) /* don't do mz before some time */ - last_mz = get_time(); - ts = time_diff(get_time(), last_mz); - if (time_to_ns(ts) > g_options.mz_ns) { - do_memzero(msg, eo_ctx); - last_mz = get_time(); - } - } - - test_fatal_if(em_send(evt, eo_ctx->bg_q) != EM_OK, "Failed to send BG job event!"); - return 1; -} - -void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event) -{ - app_msg_t *msg = (app_msg_t *)em_event_pointer(event); - int cores = em_core_count(); - int done = 0; - e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_SEQ_CST); - static int runs; - static uint64_t linuxns; - static uint64_t tmrtick; - static time_stamp timetick; - - /* heartbeat runs states of the test */ - - msg->count++; - add_trace(eo_ctx, -1, OP_HB, linux_time_ns(), msg->count, -1); - - if (EXTRA_PRINTS) - APPL_PRINT("."); - - switch (state) { - case STATE_INIT: - if (msg->count > eo_ctx->last_hbcount + INIT_WAIT) { - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - eo_ctx->last_hbcount = msg->count; - APPL_PRINT("ROUND %d\n", runs + 1); - APPL_PRINT("->Starting tick measurement\n"); - } - break; - - case STATE_MEASURE: /* measure timer frequencies */ - if (linuxns == 0) { - linuxns = linux_time_ns(); - timetick = get_time(); - /* use timer[0] for this always */ - tmrtick = em_timer_current_tick(eo_ctx->test_tmr[0]); - } - if (msg->count > eo_ctx->last_hbcount + MEAS_PERIOD) { - analyze_measure(eo_ctx, linuxns, tmrtick, timetick); - linuxns = 0; - /* start new run */ - if (g_options.num_runs > 1) - APPL_PRINT("** Round %d\n", runs + 1); - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - } - break; - - case STATE_STABILIZE: /* give some time to get up */ - if (g_options.bg_events) - send_bg_events(eo_ctx); - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - if (EXTRA_PRINTS) - APPL_PRINT("->Starting tmos\n"); - start_periodic(eo_ctx); - eo_ctx->last_hbcount = msg->count; - break; - - case STATE_RUN: /* run the test, avoid prints */ - for (int i = 0; i < cores; i++) { - if (eo_ctx->cdat[i].count >= - g_options.tracebuf) { - done++; - break; - } - } - if (done) { - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - eo_ctx->last_hbcount = msg->count; - if (EXTRA_PRINTS) - APPL_PRINT("->All cores done\n"); - } - break; - - case STATE_COOLOFF: /* stop further timeouts */ - if (msg->count > (eo_ctx->last_hbcount + eo_ctx->cooloff)) { - __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); - eo_ctx->last_hbcount = msg->count; - if (EXTRA_PRINTS) - APPL_PRINT("->Starting analyze\n"); - } - break; - - case STATE_ANALYZE: /* expected to be stopped, analyze data */ - APPL_PRINT("\n"); - analyze(eo_ctx); - cleanup(eo_ctx); - /* re-start test cycle */ - __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); - runs++; - if (runs >= g_options.num_runs && g_options.num_runs != 0) { - /* terminate test app */ - APPL_PRINT("%d runs done\n", runs); - raise(SIGINT); - } - eo_ctx->last_hbcount = msg->count; - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid test state"); - } - - /* heartbeat never stops */ - if (em_tmo_ack(eo_ctx->heartbeat_tmo, event) != EM_OK) - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "HB ack() fail!\n"); -} - -void usage(void) -{ - printf("%s\n", instructions); - - printf("Usage:\n"); - for (int i = 0; ; i++) { - if (longopts[i].name == NULL || descopts[i] == NULL) - break; - printf("--%s or -%c: %s\n", longopts[i].name, longopts[i].val, descopts[i]); - } -} - -int parse_my_args(int first, int argc, char *argv[]) -{ - optind = first + 1; /* skip '--' */ - while (1) { - int opt; - int long_index; - char *endptr; - int64_t num; - - opt = getopt_long(argc, argv, shortopts, longopts, &long_index); - - if (opt == -1) - break; /* No more options */ - - switch (opt) { - case 's': { - g_options.noskip = 1; - } - break; - case 'a': { - g_options.profile = 1; - } - break; - case 'b': { - g_options.jobs = 1; - } - break; - case 'd': { - g_options.dispatch = 1; - } - break; - case 'i': { - g_options.info_only = 1; - } - break; - case 'u': { - g_options.usehuge = 1; - } - break; - case 'q': { - g_options.no_del = 1; - } - break; - case 'g': { - g_options.cpucycles = 1; - if (optarg != NULL) { /* optional arg */ - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 2) - return 0; - g_options.cpucycles = num; - } - } - break; - case 'w': { /* optional arg */ - g_options.csv = "stdout"; - if (optarg != NULL) - g_options.csv = optarg; - } - break; - case 'm': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 1) - return 0; - g_options.max_period_ns = (uint64_t)num; - } - break; - case 'l': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 1) - return 0; - g_options.min_period_ns = num; - } - break; - case 't': { - unsigned long size, perc; - - num = sscanf(optarg, "%lu,%lu", &size, &perc); - if (num == 0 || size < 10 || - sizeof(tmo_trace) * size > MAX_TMO_BYTES) - return 0; - g_options.tracebuf = size; - if (num == 2 && perc > 100) - return 0; - if (num == 2) - g_options.stoplim = ((perc * size) / 100); - else - g_options.stoplim = ((STOP_THRESHOLD * size) / 100); - } - break; - case 'e': { - unsigned int min_us, max_us, prop; - - if (sscanf(optarg, "%u,%u,%u", &min_us, &max_us, &prop) != 3) - return 0; - if (prop > 100 || max_us < 1) - return 0; - g_options.min_work_ns = 1000ULL * min_us; - g_options.max_work_ns = 1000ULL * max_us; - g_options.work_prop = prop; - } - break; - case 'o': { - unsigned int mb; - uint64_t ms; - unsigned int hp = 0; - - if (sscanf(optarg, "%u,%lu,%u", &mb, &ms, &hp) < 2) - return 0; - if (mb < 1 || ms < 1) - return 0; - g_options.mz_mb = mb; - g_options.mz_ns = ms * 1000UL * 1000UL; - if (hp) - g_options.mz_huge = 1; - } - break; - case 'j': { - unsigned int evts, us, kb, chunk; - - num = sscanf(optarg, "%u,%u,%u,%u", &evts, &us, &kb, &chunk); - if (num == 0 || evts < 1) - return 0; - g_options.bg_events = evts; - if (num > 1 && us) - g_options.bg_time_ns = us * 1000ULL; - if (num > 2 && kb) - g_options.bg_size = kb * 1024; - if (num > 3 && chunk) - g_options.bg_chunk = chunk * 1024; - if (g_options.bg_chunk > g_options.bg_size) - return 0; - } - break; - case 'n': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.num_periodic = num; - } - break; - case 'p': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 0) - return 0; - g_options.period_ns = num; - } - break; - case 'f': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < -1) - return 0; - g_options.first_ns = num; - } - break; - case 'c': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.clock_src = num; - } - break; - case 'r': { - if (!arg_to_ns(optarg, &num)) - return 0; - if (num < 0) - return 0; - g_options.res_ns = num; - } - break; - case 'z': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.res_hz = num; - g_options.res_ns = 0; - } - break; - case 'x': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 0) - return 0; - g_options.num_runs = num; - } - break; - case 'k': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0') - return 0; - g_options.abort = num; - } - break; - - case 'y': { - num = strtol(optarg, &endptr, 0); - if (*endptr != '\0' || num < 1) - return 0; - g_options.num_timers = num; - } - break; - - case 'h': - default: - opterr = 0; - usage(); - return 0; - } - } - - optind = 1; /* cm_setup() to parse again */ - return 1; -} - -/** - * Before EM - Init - */ -void test_init(void) -{ - int core = em_core_id(); - - /* first core creates ShMem */ - if (core == 0) { - m_shm = env_shared_reserve("Timer_test", sizeof(timer_app_shm_t)); - /* initialize it */ - if (m_shm) - memset(m_shm, 0, sizeof(timer_app_shm_t)); - - APPL_PRINT("%ldk shared memory for app context\n", sizeof(timer_app_shm_t) / 1000); - - } else { - m_shm = env_shared_lookup("Timer_test"); - } - - if (m_shm == NULL) { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, - "ShMem init failed on EM-core: %u", - em_core_id()); - } - - APPL_PRINT("core %d: %s done\n", core, __func__); -} - -/** - * Startup of the timer test EM application - */ -void test_start(appl_conf_t *const appl_conf) -{ - em_eo_t eo; - em_timer_attr_t attr; - em_queue_t queue; - em_status_t stat; - app_eo_ctx_t *eo_ctx; - em_timer_res_param_t res_capa; - em_timer_capability_t capa = { 0 }; /* init to avoid gcc warning with LTO */ - em_core_mask_t mask; - em_queue_group_t grp; - em_atomic_group_t agrp; - - if (appl_conf->num_procs > 1) { - APPL_PRINT("\n!! Multiple PROCESS MODE NOT SUPPORTED !!\n\n"); - raise(SIGINT); - return; - } - - if (appl_conf->num_pools >= 1) - m_shm->pool = appl_conf->pools[0]; - else - m_shm->pool = EM_POOL_DEFAULT; - - eo_ctx = &m_shm->eo_context; - memset(eo_ctx, 0, sizeof(app_eo_ctx_t)); - eo_ctx->tmo_data = calloc(g_options.num_periodic, sizeof(tmo_setup)); - test_fatal_if(eo_ctx->tmo_data == NULL, "Can't alloc tmo_setups"); - - eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, - app_eo_stop, app_eo_stop_local, app_eo_receive, - eo_ctx); - test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); - - stat = em_register_error_handler(my_error_handler); - test_fatal_if(stat != EM_OK, "Failed to register error handler"); - - /* Create atomic group and queues for control messages */ - stat = em_queue_group_get_mask(EM_QUEUE_GROUP_DEFAULT, &mask); - test_fatal_if(stat != EM_OK, "Failed to get default Q grp mask!"); - - grp = em_queue_group_create_sync("CTRL_GRP", &mask); - test_fatal_if(grp == EM_QUEUE_GROUP_UNDEF, "Failed to create Q grp!"); - agrp = em_atomic_group_create("CTRL_AGRP", grp); - test_fatal_if(agrp == EM_ATOMIC_GROUP_UNDEF, "Failed to create atomic grp!"); - eo_ctx->agrp = agrp; - - queue = em_queue_create_ag("Control Q", EM_QUEUE_PRIO_NORMAL, agrp, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create hb queue!"); - eo_ctx->hb_q = queue; - - queue = em_queue_create_ag("Stop Q", EM_QUEUE_PRIO_HIGHEST, agrp, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create stop queue!"); - eo_ctx->stop_q = queue; - - /* parallel high priority for timeout handling*/ - queue = em_queue_create("Tmo Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_HIGH, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to create queue!"); - eo_ctx->test_q = queue; - - /* another parallel low priority for background work*/ - queue = em_queue_create("BG Q", - EM_QUEUE_TYPE_PARALLEL, - EM_QUEUE_PRIO_LOWEST, - EM_QUEUE_GROUP_DEFAULT, NULL); - stat = em_eo_add_queue_sync(eo, queue); - test_fatal_if(stat != EM_OK, "Failed to add queue!"); - eo_ctx->bg_q = queue; - - /* create two timers so HB and tests can be independent */ - em_timer_attr_init(&attr); - strncpy(attr.name, "HBTimer", EM_TIMER_NAME_LEN); - m_shm->hb_tmr = em_timer_create(&attr); - test_fatal_if(m_shm->hb_tmr == EM_TIMER_UNDEF, - "Failed to create HB timer!"); - - /* test timer */ - test_fatal_if(g_options.res_ns && g_options.res_hz, "Give resolution in ns OR hz!"); - - em_timer_attr_init(&attr); - stat = em_timer_capability(&capa, g_options.clock_src); - - APPL_PRINT("Timer capability for clksrc %d:\n", g_options.clock_src); - APPL_PRINT(" maximum timers: %d\n", capa.max_timers); - APPL_PRINT(" max_res %luns %luhz min_tmo %lu max_tmo %lu\n", - capa.max_res.res_ns, capa.max_res.res_hz, - capa.max_res.min_tmo, capa.max_res.max_tmo); - APPL_PRINT(" max_tmo %luns %luhz min_tmo %lu max_tmo %lu\n", - capa.max_tmo.res_ns, capa.max_tmo.res_hz, - capa.max_tmo.min_tmo, capa.max_tmo.max_tmo); - - test_fatal_if(stat != EM_OK, "Given clk_src is not supported\n"); - memset(&res_capa, 0, sizeof(em_timer_res_param_t)); - if (!g_options.res_hz) { - res_capa.res_ns = g_options.res_ns == 0 ? capa.max_res.res_ns : g_options.res_ns; - APPL_PRINT("Trying %lu ns resolution capability on clk %d\n", - res_capa.res_ns, g_options.clock_src); - } else { - res_capa.res_hz = g_options.res_hz; - APPL_PRINT("Trying %lu Hz resolution capability on clk %d\n", - res_capa.res_hz, g_options.clock_src); - } - - APPL_PRINT("Asking timer capability for clksrc %d:\n", g_options.clock_src); - APPL_PRINT("%luns %luhz min_tmo %lu max_tmo %lu\n", - res_capa.res_ns, res_capa.res_hz, - res_capa.min_tmo, res_capa.max_tmo); - stat = em_timer_res_capability(&res_capa, g_options.clock_src); - APPL_PRINT("-> Timer res_capability:\n"); - APPL_PRINT("max_res %luns %luhz min_tmo %lu max_tmo %lu\n", - res_capa.res_ns, res_capa.res_hz, - res_capa.min_tmo, res_capa.max_tmo); - test_fatal_if(stat != EM_OK, "Given resolution is not supported (ret %d)\n", stat); - - if (!g_options.max_period_ns) { - g_options.max_period_ns = DEF_MAX_PERIOD; - if (g_options.max_period_ns > res_capa.max_tmo) - g_options.max_period_ns = res_capa.max_tmo; - } - if (!g_options.min_period_ns) { - g_options.min_period_ns = res_capa.res_ns * DEF_MIN_PERIOD; - if (g_options.min_period_ns < res_capa.min_tmo) - g_options.min_period_ns = res_capa.min_tmo; - } - - if (g_options.info_only) { /* stop here */ - raise(SIGINT); - } else { - strncpy(attr.name, "TestTimer", EM_TIMER_NAME_LEN); - attr.resparam = res_capa; - if (g_options.res_hz) /* can only have one */ - attr.resparam.res_ns = 0; - else - attr.resparam.res_hz = 0; - attr.num_tmo = g_options.num_periodic; - attr.resparam.max_tmo = g_options.max_period_ns; /* don't need more */ - for (int i = 0; i < g_options.num_timers; i++) { - m_shm->test_tmr[i] = em_timer_create(&attr); - test_fatal_if(m_shm->test_tmr[i] == EM_TIMER_UNDEF, "Failed to create test timer!"); - eo_ctx->test_tmr[i] = m_shm->test_tmr[i]; - } - APPL_PRINT("%d test timers created\n", g_options.num_timers); - g_options.res_ns = attr.resparam.res_ns; - } - - /* Start EO */ - stat = em_eo_start_sync(eo, NULL, NULL); - test_fatal_if(stat != EM_OK, "Failed to start EO!"); - - mlockall(MCL_FUTURE); -} - -void -test_stop(appl_conf_t *const appl_conf) -{ - const int core = em_core_id(); - em_status_t ret; - em_eo_t eo; - - if (appl_conf->num_procs > 1) { - APPL_PRINT("%s(): skip\n", __func__); - return; - } - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - eo = em_eo_find(APP_EO_NAME); - test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); - - ret = em_eo_stop_sync(eo); - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); - ret = em_eo_delete(eo); - test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); - - ret = em_timer_delete(m_shm->hb_tmr); - test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->hb_tmr, ret); - for (int i = 0; i < g_options.num_timers; i++) { - if (m_shm->test_tmr[i] != EM_TIMER_UNDEF) { - ret = em_timer_delete(m_shm->test_tmr[i]); - test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", - m_shm->test_tmr[i], ret); - } - } - free(m_shm->eo_context.tmo_data); -} - -void test_term(void) -{ - int core = em_core_id(); - - APPL_PRINT("%s() on EM-core %d\n", __func__, core); - - if (m_shm != NULL) { - em_unregister_error_handler(); - env_shared_free(m_shm); - m_shm = NULL; - } -} - -static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) -{ - #define PRINT_MAX_TMRS 4 - em_timer_attr_t attr; - em_timer_t tmr[PRINT_MAX_TMRS]; - int num_timers; - app_msg_t *msg; - struct timespec ts; - uint64_t period; - em_event_t event; - app_eo_ctx_t *eo_ctx = (app_eo_ctx_t *)eo_context; - - (void)eo; - (void)conf; - - if (g_options.info_only) - return EM_OK; - - num_timers = em_timer_get_all(tmr, PRINT_MAX_TMRS); - - for (int i = 0; - i < (num_timers > PRINT_MAX_TMRS ? PRINT_MAX_TMRS : num_timers); - i++) { - if (em_timer_get_attr(tmr[i], &attr) != EM_OK) { - APPL_ERROR("Can't get timer info\n"); - return EM_ERR_BAD_ID; - } - APPL_PRINT("Timer \"%s\" info:\n", attr.name); - APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); - APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); - APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); - APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); - APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", - em_timer_get_freq(tmr[i])); - } - - APPL_PRINT("\nActive run options:\n"); - APPL_PRINT(" num timers: %d\n", g_options.num_timers); - APPL_PRINT(" num timeouts: %d\n", g_options.num_periodic); - if (g_options.res_hz) { - APPL_PRINT(" resolution: %lu Hz (%f MHz)\n", g_options.res_hz, - (double)g_options.res_hz / 1000000); - } else { - APPL_PRINT(" resolution: %lu ns (%fs)\n", g_options.res_ns, - (double)g_options.res_ns / 1000000000); - } - if (g_options.period_ns == 0) - APPL_PRINT(" period: random\n"); - else - APPL_PRINT(" period: %lu ns (%fs%s)\n", g_options.period_ns, - (double)g_options.period_ns / 1000000000, - g_options.period_ns == 0 ? " (random)" : ""); - if (g_options.first_ns == -1) - APPL_PRINT(" first period: random\n"); - else - APPL_PRINT(" first period: %ld ns (%fs%s)\n", g_options.first_ns, - (double)g_options.first_ns / 1000000000, - g_options.first_ns == 0 ? " (=period)" : ""); - APPL_PRINT(" max period: %luns (%fs)\n", g_options.max_period_ns, - (double)g_options.max_period_ns / 1000000000); - APPL_PRINT(" min period: %luns (%fs)\n", g_options.min_period_ns, - (double)g_options.min_period_ns / 1000000000); - APPL_PRINT(" csv: %s\n", - g_options.csv == NULL ? "(no)" : g_options.csv); - APPL_PRINT(" tracebuffer: %d tmo events (%luKiB)\n", - g_options.tracebuf, - g_options.tracebuf * sizeof(tmo_trace) / 1024); - APPL_PRINT(" stop limit: %d tmo events\n", g_options.stoplim); - APPL_PRINT(" use NOSKIP: %s\n", g_options.noskip ? "yes" : "no"); - APPL_PRINT(" profile API: %s\n", g_options.profile ? "yes" : "no"); - APPL_PRINT(" dispatch prof:%s\n", g_options.dispatch ? "yes" : "no"); - APPL_PRINT(" time stamps: %s\n", g_options.cpucycles ? - "CPU cycles" : "odp_time()"); - APPL_PRINT(" work propability:%u%%\n", g_options.work_prop); - if (g_options.work_prop) { - APPL_PRINT(" min_work: %luns\n", g_options.min_work_ns); - APPL_PRINT(" max_work: %luns\n", g_options.max_work_ns); - } - APPL_PRINT(" bg events: %u\n", g_options.bg_events); - eo_ctx->bg_data = NULL; - if (g_options.bg_events) { - APPL_PRINT(" bg work: %luus\n", g_options.bg_time_ns / 1000); - APPL_PRINT(" bg data: %ukiB\n", g_options.bg_size / 1024); - APPL_PRINT(" bg chunk: %ukiB (%u blks)\n", - g_options.bg_chunk / 1024, - g_options.bg_size / g_options.bg_chunk); - APPL_PRINT(" bg trace: %s\n", g_options.jobs ? "yes" : "no"); - - eo_ctx->bg_data = malloc(g_options.bg_size); - test_fatal_if(eo_ctx->bg_data == NULL, - "Can't allocate bg work data (%dkiB)!\n", - g_options.bg_size / 1024); - } - APPL_PRINT(" memzero: "); - if (g_options.mz_mb) - APPL_PRINT("%uMB %severy %lums\n", - g_options.mz_mb, - g_options.mz_huge ? "(mmap huge) " : "", - g_options.mz_ns / 1000000UL); - else - APPL_PRINT("no\n"); - - if (g_options.abort != 0) { - APPL_PRINT(" abort after: "); - if (g_options.abort) - APPL_PRINT("%d%s\n", - g_options.abort, g_options.abort < 0 ? "(segfault)" : ""); - else - APPL_PRINT("0 (no)\n"); - } - if (g_options.num_runs != 1) - APPL_PRINT(" delete tmos: %s", g_options.no_del ? "no" : "yes"); - - APPL_PRINT("\nTracing first %d tmo events\n", g_options.tracebuf); - - if (g_options.bg_events) - prefault(eo_ctx->bg_data, g_options.bg_size); - - /* create periodic timeout for heartbeat */ - eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->hb_tmr, EM_TMO_FLAG_PERIODIC, eo_ctx->hb_q); - test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, - "Can't allocate heartbeat_tmo!\n"); - - event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); - test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", - sizeof(app_msg_t)); - - msg = em_event_pointer(event); - msg->command = CMD_HEARTBEAT; - msg->count = 0; - msg->id = -1; - eo_ctx->hb_hz = em_timer_get_freq(m_shm->hb_tmr); - if (eo_ctx->hb_hz < 10) - APPL_ERROR("WARNING: HB timer hz very low!\n"); - else - APPL_PRINT("HB timer frequency is %lu\n", eo_ctx->hb_hz); - - period = eo_ctx->hb_hz; /* 1s */ - test_fatal_if(period < 1, "timer resolution is too low!\n"); - - /* linux time check */ - test_fatal_if(clock_getres(CLOCK_MONOTONIC, &ts) != 0, - "clock_getres() failed!\n"); - - period = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); - eo_ctx->linux_hz = 1000000000ULL / period; - APPL_PRINT("Linux reports clock running at %" PRIu64 " hz\n", eo_ctx->linux_hz); - APPL_PRINT("ODP says time_global runs at %luHz\n", odp_time_global_res()); - if (!g_options.cpucycles) - eo_ctx->time_hz = odp_time_global_res(); - - /* start heartbeat */ - __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); - - em_status_t stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, eo_ctx->hb_hz, event); - - if (EXTRA_PRINTS && stat != EM_OK) - APPL_PRINT("FAILED to set HB tmo, stat=%d: period=%lu\n", stat, eo_ctx->hb_hz); - test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); - - eo_ctx->test_hz = em_timer_get_freq(m_shm->test_tmr[0]); /* use timer[0] */ - test_fatal_if(eo_ctx->test_hz == 0, - "get_freq() failed, timer:%" PRI_TMR "", m_shm->test_tmr[0]); - - stat = em_dispatch_register_enter_cb(enter_cb); - test_fatal_if(stat != EM_OK, "enter_cb() register failed!"); - stat = em_dispatch_register_exit_cb(exit_cb); - test_fatal_if(stat != EM_OK, "exit_cb() register failed!"); - - srandom(time(NULL)); - if (g_options.max_work_ns > RAND_MAX || - g_options.max_period_ns > RAND_MAX) { - double s = (double)RAND_MAX / (double)eo_ctx->test_hz; - - APPL_PRINT("WARNING: rnd number range is less than max values (up to %.4fs)\n", s); - } - if (EXTRA_PRINTS) - APPL_PRINT("WARNING: extra prints enabled, expect some jitter\n"); - - return EM_OK; -} - -/** - * @private - * - * EO per thread start function. - */ -static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - int core = em_core_id(); - - (void)eo; - - if (EXTRA_PRINTS) - APPL_PRINT("EO local start\n"); - test_fatal_if(core >= MAX_CORES, "Too many cores!"); - eo_ctx->cdat[core].trc = allocate_tracebuf(g_options.tracebuf, sizeof(tmo_trace), - &eo_ctx->cdat[core].trc_size); - test_fatal_if(eo_ctx->cdat[core].trc == NULL, "Failed to allocate trace buffer!"); - eo_ctx->cdat[core].count = 0; - eo_ctx->cdat[core].cancelled = 0; - eo_ctx->cdat[core].jobs_deleted = 0; - eo_ctx->cdat[core].jobs = 0; - - memset(&eo_ctx->cdat[core].rng, 0, sizeof(rnd_state_t)); - initstate_r(time(NULL), eo_ctx->cdat[core].rng.rndstate, RND_STATE_BUF, - &eo_ctx->cdat[core].rng.rdata); - srandom_r(time(NULL), &eo_ctx->cdat[core].rng.rdata); - return EM_OK; -} - -/** - * @private - * - * EO stop function. - */ -static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - em_event_t event = EM_EVENT_UNDEF; - em_status_t ret; - - if (EXTRA_PRINTS) - APPL_PRINT("EO stop\n"); - - if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { - em_tmo_delete(eo_ctx->heartbeat_tmo, &event); - eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; - if (event != EM_EVENT_UNDEF) - em_free(event); - } - - /* cancel all test timers in case test didn't complete */ - int dcount = 0; - - for (int i = 0; i < g_options.num_periodic; i++) { - if (eo_ctx->tmo_data[i].handle != EM_TMO_UNDEF) { - event = EM_EVENT_UNDEF; - em_tmo_delete(eo_ctx->tmo_data[i].handle, &event); - eo_ctx->tmo_data[i].handle = EM_TMO_UNDEF; - if (event != EM_EVENT_UNDEF) - em_free(event); - dcount++; - } - } - if (dcount) - APPL_PRINT("NOTE: deleted %d still active tmos\n", dcount); - - ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); /* remove and delete */ - test_fatal_if(ret != EM_OK, - "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); - - ret = em_atomic_group_delete(((app_eo_ctx_t *)eo_context)->agrp); - test_fatal_if(ret != EM_OK, - "EO remove atomic grp:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); - - if (!g_options.info_only) { - ret = em_dispatch_unregister_enter_cb(enter_cb); - test_fatal_if(ret != EM_OK, "enter_cb() unregister:%" PRI_STAT, ret); - ret = em_dispatch_unregister_exit_cb(exit_cb); - test_fatal_if(ret != EM_OK, "exit_cb() unregister:%" PRI_STAT, ret); - } - - if (eo_ctx->bg_data != NULL) - free(eo_ctx->bg_data); - eo_ctx->bg_data = NULL; - if (eo_ctx->mz_data != NULL) { - if (g_options.mz_huge) - munmap(eo_ctx->mz_data, g_options.mz_mb * 1024UL * 1024UL); - else - free(eo_ctx->mz_data); - - eo_ctx->mz_data = NULL; - } - - return EM_OK; -} - -/** - * @private - * - * EO stop local function. - */ -static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo) -{ - int core = em_core_id(); - app_eo_ctx_t *const eo_ctx = eo_context; - - (void)eo; - - if (EXTRA_PRINTS) - APPL_PRINT("EO local stop\n"); - free_tracebuf(eo_ctx->cdat[core].trc, eo_ctx->cdat[core].trc_size); - eo_ctx->cdat[core].trc = NULL; - return EM_OK; -} - -/** - * @private - * - * EO receive function - */ -static void app_eo_receive(void *eo_context, em_event_t event, - em_event_type_t type, em_queue_t queue, - void *q_context) -{ - app_eo_ctx_t *const eo_ctx = eo_context; - int reuse = 0; - static int last_count; - - (void)q_context; - - if (type == EM_EVENT_TYPE_SW) { - app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); - - switch (msgin->command) { - case CMD_TMO: - reuse = handle_periodic(eo_ctx, event); - break; - - case CMD_HEARTBEAT: /* uses atomic queue */ - handle_heartbeat(eo_ctx, event); - last_count = msgin->count; - reuse = 1; - break; - - case CMD_BGWORK: - reuse = do_bg_work(event, eo_ctx); - break; - - case CMD_DONE: /* HB atomic queue */ { - e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); - - /* only do this once */ - if (state == STATE_RUN && queue == eo_ctx->stop_q) { - __atomic_store_n(&eo_ctx->state, STATE_COOLOFF, __ATOMIC_SEQ_CST); - add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), STATE_COOLOFF, -1); - eo_ctx->last_hbcount = last_count; - eo_ctx->stopped = get_time(); - APPL_PRINT("Core %d reported DONE\n", msgin->id); - } - } - break; - - default: - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event!\n"); - } - } else { - test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event type!\n"); - } - - if (!reuse) - em_free(event); -} - -int main(int argc, char *argv[]) -{ - /* pick app-specific arguments after '--' */ - int i; - - APPL_PRINT("EM periodic timer test %s\n\n", VERSION); - - for (i = 1; i < argc; i++) { - if (!strcmp(argv[i], "--")) - break; - } - if (i < argc) { - if (!parse_my_args(i, argc, argv)) { - APPL_PRINT("Invalid application arguments\n"); - return 1; - } - } - - return cm_setup(argc, argv); -} +/* + * Copyright (c) 2020-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine timer test for periodic timeouts. + * + * see instructions - string at timer_test_periodic.h. + * + * Exception/error management is simplified and aborts on any error. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "cm_setup.h" +#include "cm_error_handler.h" + +#include "timer_test_periodic.h" + +#define VERSION "WIP v0.9" +struct { + int num_periodic; + uint64_t res_ns; + uint64_t res_hz; + uint64_t period_ns; + int64_t first_ns; + uint64_t max_period_ns; + uint64_t min_period_ns; + uint64_t min_work_ns; + uint64_t max_work_ns; + unsigned int work_prop; + int clock_src; + const char *csv; + int num_runs; + int tracebuf; + int stoplim; + int noskip; + int profile; + int dispatch; + int jobs; + long cpucycles; + int info_only; + int usehuge; /* for trace buffer */ + int bg_events; + uint64_t bg_time_ns; + int bg_size; + int bg_chunk; + int mz_mb; + int mz_huge; + uint64_t mz_ns; + int abort; /* for testing abnormal exit */ + int num_timers; + int no_del; + +} g_options = { .num_periodic = 1, /* defaults for basic check */ + .res_ns = DEF_RES_NS, + .res_hz = 0, + .period_ns = DEF_PERIOD * DEF_RES_NS, + .first_ns = 0, + .max_period_ns = 0, /* max,min updated in init if not given cmdline */ + .min_period_ns = 0, + .min_work_ns = 0, + .max_work_ns = 0, + .work_prop = 0, + .clock_src = EM_TIMER_CLKSRC_DEFAULT, + .csv = NULL, + .num_runs = 1, + .tracebuf = DEF_TMO_DATA, + .stoplim = ((STOP_THRESHOLD * DEF_TMO_DATA) / 100), + .noskip = 1, + .profile = 0, + .dispatch = 0, + .jobs = 0, + .cpucycles = 0, + .info_only = 0, + .usehuge = 0, + .bg_events = 0, + .bg_time_ns = 10000, + .bg_size = 5000 * 1024, + .bg_chunk = 50 * 1024, + .mz_mb = 0, + .mz_huge = 0, + .mz_ns = 0, + .abort = 0, + .num_timers = 1, + .no_del = 0 + }; + +typedef struct global_stats_t { + uint64_t num_late; /* ack late */ + int64_t max_dev_ns; /* +- max deviation form target */ + int64_t max_early_ns; /* max arrival before target time */ + uint64_t num_tmo; /* total received tmo count */ + int max_cpu; /* max CPU load % (any single) */ + uint64_t max_dispatch; /* max EO receive time */ +} global_stats_t; + +typedef struct app_eo_ctx_t { + e_state state; + em_tmo_t heartbeat_tmo; + em_timer_t test_tmr[MAX_TEST_TIMERS]; + em_queue_t hb_q; + em_queue_t test_q; + em_queue_t stop_q; + em_queue_t bg_q; + int cooloff; + int last_hbcount; + uint64_t hb_hz; + uint64_t test_hz; + uint64_t time_hz; + uint64_t meas_test_hz; + uint64_t meas_time_hz; + uint64_t linux_hz; + uint64_t max_period; + time_stamp started; + time_stamp stopped; + void *bg_data; + void *mz_data; + uint64_t mz_count; + int stop_sent; + em_atomic_group_t agrp; + global_stats_t global_stat; + tmo_setup *tmo_data; + core_data cdat[MAX_CORES]; +} app_eo_ctx_t; + +typedef struct timer_app_shm_t { + em_pool_t pool; + app_eo_ctx_t eo_context; + em_timer_t hb_tmr; + em_timer_t test_tmr[MAX_TEST_TIMERS]; +} timer_app_shm_t; + +#if defined(__aarch64__) +static inline uint64_t get_cpu_cycle(void) +{ + uint64_t r; + + __asm__ volatile ("mrs %0, pmccntr_el0" : "=r"(r) :: "memory"); + return r; +} +#elif defined(__x86_64__) +static inline uint64_t get_cpu_cycle(void) +{ + uint32_t a, d; + + __asm__ volatile ("rdtsc" : "=a"(a), "=d"(d) :: "memory"); + return (uint64_t)a | ((uint64_t)d) << 32; +} +#else +#error "Code supports Aarch64 or x86_64" +#endif + +/* EM-thread locals */ +static __thread timer_app_shm_t *m_shm; + +static void start_periodic(app_eo_ctx_t *eo_context); +static int handle_periodic(app_eo_ctx_t *eo_context, em_event_t event); +static void send_stop(app_eo_ctx_t *eo_context); +static void handle_heartbeat(app_eo_ctx_t *eo_context, em_event_t event); +static void usage(void); +static int parse_my_args(int first, int argc, char *argv[]); +static void analyze(app_eo_ctx_t *eo_ctx); +static void write_trace(app_eo_ctx_t *eo_ctx, const char *name); +static void cleanup(app_eo_ctx_t *eo_ctx); +static int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx); +static uint64_t linux_time_ns(void); +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo); +static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo); +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, void *q_context); +static time_stamp get_time(void); +static uint64_t time_to_ns(time_stamp t); +static time_stamp time_diff(time_stamp t2, time_stamp t1); +static time_stamp time_sum(time_stamp t1, time_stamp t2); +static int arg_to_ns(const char *s, int64_t *val); +static void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx); +static void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx); +static void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, + uint64_t tmrtick, time_stamp timetick); +static void timing_statistics(app_eo_ctx_t *eo_ctx); +static void add_prof(app_eo_ctx_t *eo_ctx, time_stamp t1, e_op op, app_msg_t *msg); +static int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, + time_stamp *min, time_stamp *max, time_stamp *first, + int64_t *tgt_max_ns, int64_t *max_early_ns); +static tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last); +static uint64_t random_tmo_ns(void); +static uint64_t random_work_ns(rnd_state_t *rng); +static void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx); +static void exit_cb(em_eo_t eo); +static void send_bg_events(app_eo_ctx_t *eo_ctx); +static int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx); +static int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx); +static em_status_t my_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args); +static void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize); +static void free_tracebuf(void *ptr, size_t realsize); +static void prefault(void *buf, size_t size); +static void show_global_stats(app_eo_ctx_t *eo_ctx); + +/* --------------------------------------- */ +em_status_t my_error_handler(em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args) +{ + if (escope == 0xDEAD) { /* test_fatal_if */ + va_list my_args; + + va_copy(my_args, args); + + char *file = va_arg(my_args, char*); + const char *func = va_arg(my_args, const char*); + const int line = va_arg(my_args, const int); + const char *format = va_arg(my_args, const char*); + const char *base = basename(file); + + #pragma GCC diagnostic push + #pragma GCC diagnostic ignored "-Wformat-nonliteral" + fprintf(stderr, "FATAL - %s:%d, %s():\n", + base, line, func); + vfprintf(stderr, format, my_args); + #pragma GCC diagnostic pop + va_end(my_args); + } + return test_error_handler(eo, error, escope, args); +} + +void enter_cb(em_eo_t eo, void **eo_ctx, em_event_t events[], int num, + em_queue_t *queue, void **q_ctx) +{ + static int count; + app_eo_ctx_t *const my_eo_ctx = *eo_ctx; + + (void)eo; + (void)queue; + (void)q_ctx; + + if (unlikely(!my_eo_ctx)) + return; + + if (g_options.dispatch) { + for (int i = 0; i < num; i++) { + app_msg_t *msg = em_event_pointer(events[i]); + + add_trace(my_eo_ctx, msg->id, OP_PROF_ENTER_CB, + 0, count++, -1); + } + } + my_eo_ctx->cdat[em_core_id()].enter = get_time(); +} + +void exit_cb(em_eo_t eo) +{ + static int count; + app_eo_ctx_t *const my_eo_ctx = em_eo_get_context(eo); + + if (unlikely(!my_eo_ctx)) + return; + + if (g_options.dispatch) + add_trace(my_eo_ctx, -1, OP_PROF_EXIT_CB, 0, count++, -1); + + core_data *cdat = &my_eo_ctx->cdat[em_core_id()]; + time_stamp took; + + if (__atomic_load_n(&my_eo_ctx->state, __ATOMIC_ACQUIRE) == STATE_RUN) { + took = time_diff(get_time(), cdat->enter); + cdat->acc_time = time_sum(cdat->acc_time, took); + } +} + +void prefault(void *buf, size_t size) +{ + uint8_t *ptr = (uint8_t *)buf; + + /* write all pages to allocate and pre-fault (reduce runtime jitter) */ + APPL_PRINT("Pre-faulting %lu bytes at %p (EM core %d)\n", size, buf, em_core_id()); + for (size_t i = 0; i < size; i += 4096) + *(ptr + i) = (uint8_t)i; +} + +void *allocate_tracebuf(int numbuf, size_t bufsize, size_t *realsize) +{ + if (g_options.usehuge) { + *realsize = (numbuf + 1) * bufsize; + void *ptr = mmap(NULL, *realsize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | MAP_HUGETLB | MAP_LOCKED, + -1, 0); + if (ptr == MAP_FAILED) { + APPL_PRINT("Huge page mapping failed for trace buffer (%lu bytes)\n", + *realsize); + return NULL; + } else { + return ptr; + } + + } else { + void *buf = calloc(numbuf + 1, bufsize); + + *realsize = numbuf * bufsize; + prefault(buf, *realsize); + return buf; + } +} + +void free_tracebuf(void *ptr, size_t realsize) +{ + if (g_options.usehuge) + munmap(ptr, realsize); + else + free(ptr); +} + +inline time_stamp get_time(void) +{ + time_stamp t; + + if (unlikely(g_options.cpucycles)) + t.u64 = get_cpu_cycle(); + else + t.odp = odp_time_global(); + return t; +} + +uint64_t time_to_ns(time_stamp t) +{ + double ns; + + if (unlikely(g_options.cpucycles)) { /* todo drop cpucycles choice to get rid of this? */ + double hz = (double)m_shm->eo_context.time_hz; + + ns = (1000000000.0 / hz) * (double)t.u64; + } else { + ns = (double)odp_time_to_ns(t.odp); + } + return round(ns); +} + +time_stamp time_diff(time_stamp t2, time_stamp t1) +{ + time_stamp t; + + if (unlikely(g_options.cpucycles)) + t.u64 = t2.u64 - t1.u64; + else + t.odp = odp_time_diff(t2.odp, t1.odp); + + return t; +} + +time_stamp time_sum(time_stamp t1, time_stamp t2) +{ + time_stamp t; + + if (unlikely(g_options.cpucycles)) + t.u64 = t1.u64 + t2.u64; + else + t.odp = odp_time_sum(t1.odp, t2.odp); + return t; +} + +uint64_t linux_time_ns(void) +{ + struct timespec ts; + uint64_t ns; + + clock_gettime(CLOCK_MONOTONIC_RAW, &ts); + ns = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); + return ns; +} + +int arg_to_ns(const char *s, int64_t *val) +{ + char *endp; + int64_t num, mul = 1; + + num = strtol(s, &endp, 0); + if (num == 0 && *s != '0') + return 0; + + if (*endp != '\0') + switch (*endp) { + case 'n': + mul = 1; /* ns */ + break; + case 'u': + mul = 1000; /* us */ + break; + case 'm': + mul = 1000 * 1000; /* ms */ + break; + case 's': + mul = 1000 * 1000 * 1000; /* s */ + break; + default: + return 0; + } + + *val = num * mul; + return 1; +} + +void send_stop(app_eo_ctx_t *eo_ctx) +{ + em_status_t ret; + + if (!eo_ctx->stop_sent) { /* in case state change gets delayed on event overload */ + em_event_t event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate stop event!\n"); + + app_msg_t *msg = em_event_pointer(event); + + msg->command = CMD_DONE; + msg->id = em_core_id(); + ret = em_send(event, eo_ctx->stop_q); + test_fatal_if(ret != EM_OK, "em_send(): %s %" PRI_STAT, __func__, ret); + eo_ctx->stop_sent++; + } +} + +void cleanup(app_eo_ctx_t *eo_ctx) +{ + time_stamp tz = {0}; + int cores = em_core_count(); + + for (int i = 0; i < cores; i++) { + eo_ctx->cdat[i].count = 0; + eo_ctx->cdat[i].cancelled = 0; + eo_ctx->cdat[i].jobs_deleted = 0; + eo_ctx->cdat[i].jobs = 0; + eo_ctx->cdat[i].acc_time = tz; + } +} + +void write_trace(app_eo_ctx_t *eo_ctx, const char *name) +{ + int cores = em_core_count(); + FILE *fle = stdout; + + if (strcmp(name, "stdout")) + fle = fopen(g_options.csv, "w"); + if (fle == NULL) { + APPL_PRINT("FAILED to open trace file\n"); + return; + } + + fprintf(fle, "\n\n#BEGIN TRACE FORMAT 2\n"); /* for offline analyzers */ + fprintf(fle, "res_ns,res_hz,period_ns,max_period_ns,clksrc,num_tmo,loops,"); + fprintf(fle, "traces,noskip,SW-ver,bg,mz,timers\n"); + fprintf(fle, "%lu,%lu,%lu,%lu,%d,%d,%d,%d,%d,%s,\"%d/%lu\",\"%d/%lu\",%d\n", + g_options.res_ns, + g_options.res_hz, + g_options.period_ns, + g_options.max_period_ns, + g_options.clock_src, + g_options.num_periodic, + g_options.num_runs, + g_options.tracebuf, + g_options.noskip, + VERSION, + g_options.bg_events, g_options.bg_time_ns / 1000UL, + g_options.mz_mb, g_options.mz_ns / 1000000UL, + g_options.num_timers); + fprintf(fle, "time_hz,meas_time_hz,timer_hz,meas_timer_hz,linux_hz\n"); + fprintf(fle, "%lu,%lu,%lu,%lu,%lu\n", + eo_ctx->time_hz, + eo_ctx->meas_time_hz, + eo_ctx->test_hz, + eo_ctx->meas_test_hz, + eo_ctx->linux_hz); + + fprintf(fle, "tmo_id,period_ns,period_ticks,ack_late"); + fprintf(fle, ",start_tick,start_ns,first_ns,first\n"); + for (int i = 0; i < g_options.num_periodic; i++) { + fprintf(fle, "%d,%lu,%lu,%lu,%lu,%lu,%lu,%lu\n", + i, eo_ctx->tmo_data[i].period_ns, + eo_ctx->tmo_data[i].ticks, + eo_ctx->tmo_data[i].ack_late, + eo_ctx->tmo_data[i].start, + time_to_ns(eo_ctx->tmo_data[i].start_ts), + (uint64_t)eo_ctx->tmo_data[i].first_ns, + eo_ctx->tmo_data[i].first); + } + + fprintf(fle, "id,op,tick,time_ns,linux_time_ns,counter,core,timer\n"); + for (int c = 0; c < cores; c++) { + for (int i = 0; i < eo_ctx->cdat[c].count; i++) { + uint64_t ns; + + if (eo_ctx->cdat[c].trc[i].op >= OP_PROF_ACK) { + /* it's tick diff */ + ns = time_to_ns(eo_ctx->cdat[c].trc[i].linuxt); + } else { /* it's ns from linux */ + ns = eo_ctx->cdat[c].trc[i].linuxt.u64; + } + + fprintf(fle, "%d,%s,%lu,%lu,%lu,%d,%d,%d\n", + eo_ctx->cdat[c].trc[i].id, + op_labels[eo_ctx->cdat[c].trc[i].op], + eo_ctx->cdat[c].trc[i].tick, + time_to_ns(eo_ctx->cdat[c].trc[i].ts), + ns, + eo_ctx->cdat[c].trc[i].count, + c, + eo_ctx->cdat[c].trc[i].tidx); + } + } + fprintf(fle, "#END TRACE\n\n"); + if (fle != stdout) + fclose(fle); +} + +void show_global_stats(app_eo_ctx_t *eo_ctx) +{ + APPL_PRINT("\nTOTAL STATS:\n"); + APPL_PRINT(" Num tmo: %lu\n", eo_ctx->global_stat.num_tmo); + APPL_PRINT(" Num late ack: %lu", eo_ctx->global_stat.num_late); + APPL_PRINT(" (%lu%%)\n", + (eo_ctx->global_stat.num_late * 100) / eo_ctx->global_stat.num_tmo); + APPL_PRINT(" Max early arrival: %.1fus %s\n", + ((double)eo_ctx->global_stat.max_early_ns) / 1000.0, + (uint64_t)llabs(eo_ctx->global_stat.max_early_ns) > g_options.res_ns ? "!" : ""); + APPL_PRINT(" Max diff from tgt: %.1fus (res %.1fus) %s\n", + ((double)eo_ctx->global_stat.max_dev_ns) / 1000.0, + (double)g_options.res_ns / 1000.0, + (uint64_t)llabs(eo_ctx->global_stat.max_dev_ns) > (2 * g_options.res_ns) ? + ">2x res!" : ""); + APPL_PRINT(" Max CPU load: %d%%\n", eo_ctx->global_stat.max_cpu); + if (eo_ctx->global_stat.max_dispatch) + APPL_PRINT(" Max EO rcv time: %luns\n", eo_ctx->global_stat.max_dispatch); + APPL_PRINT("\n"); +} + +uint64_t random_tmo_ns(void) +{ + uint64_t r = random() % (g_options.max_period_ns - g_options.min_period_ns + 1); + + return r + g_options.min_period_ns; /* ns between min/max period */ +} + +uint64_t random_work_ns(rnd_state_t *rng) +{ + uint64_t r; + int32_t r1; + + random_r(&rng->rdata, &r1); + r = (uint64_t)r1; + if (r % 100 >= g_options.work_prop) /* propability of work roughly */ + return 0; + + random_r(&rng->rdata, &r1); + r = (uint64_t)r1 % (g_options.max_work_ns - g_options.min_work_ns + 1); + return r + g_options.min_work_ns; +} + +tmo_trace *find_tmo(app_eo_ctx_t *eo_ctx, int id, int count, int *last) +{ + int cores = em_core_count(); + tmo_trace *trc = NULL; + int last_count = 0; + + for (int c = 0; c < cores; c++) { + for (int i = 0; i < eo_ctx->cdat[c].count; i++) { /* find id */ + if (eo_ctx->cdat[c].trc[i].op == OP_TMO && + eo_ctx->cdat[c].trc[i].id == id) { /* this TMO */ + if (eo_ctx->cdat[c].trc[i].count == count) { + trc = &eo_ctx->cdat[c].trc[i]; + } else { + /* always run through for last_count */ + if (eo_ctx->cdat[c].trc[i].count > last_count) + last_count = eo_ctx->cdat[c].trc[i].count; + } + } + } + } + *last = last_count; + return trc; +} + +int do_one_tmo(int id, app_eo_ctx_t *eo_ctx, + time_stamp *min, time_stamp *max, time_stamp *first, + int64_t *tgt_max, int64_t *max_early_ns) +{ + int num = 0; + time_stamp diff; + time_stamp prev = {0}; + int last = 0; + int last_num; + uint64_t period_ns = eo_ctx->tmo_data[id].period_ns; + uint64_t first_ns = time_to_ns(eo_ctx->tmo_data[id].start_ts); + int64_t max_tgt_diff = 0; + + max->u64 = 0; + min->u64 = INT64_MAX; + + /* find in sequential order for diff to work. TODO this gets very slow with many tmos */ + + for (int count = 1; count < g_options.tracebuf; count++) { + tmo_trace *tmo = find_tmo(eo_ctx, id, count, &last_num); + + if (!tmo) { + if (last != count - 1) + APPL_PRINT("MISSING TMO: id %d, count %d\n", id, count); + *tgt_max = max_tgt_diff; + return num; + } + last++; + if (!num) { /* skip first for min/max but store time */ + diff = time_diff(tmo->ts, eo_ctx->tmo_data[id].start_ts); + *first = diff; + if (eo_ctx->tmo_data[id].first_ns != eo_ctx->tmo_data[id].period_ns) + first_ns = time_to_ns(tmo->ts); /* ignore first */ + + } else { + diff = time_diff(tmo->ts, prev); + if (last_num > count) { /*skip last diff, could be while stopping */ + if (time_to_ns(diff) > time_to_ns(*max)) + *max = diff; + if (time_to_ns(diff) < time_to_ns(*min)) + *min = diff; + + /* calculate distance to target */ + uint64_t tgt = first_ns + count * period_ns; + int64_t tgtdiff = (int64_t)time_to_ns(tmo->ts) - (int64_t)tgt; + + if (llabs(max_tgt_diff) < llabs(tgtdiff)) + max_tgt_diff = tgtdiff; + if (tgtdiff < *max_early_ns) + *max_early_ns = tgtdiff; + } + } + prev = tmo->ts; + num++; + } + *tgt_max = max_tgt_diff; + return num; +} + +void timing_statistics(app_eo_ctx_t *eo_ctx) +{ + /* basic statistics, more with offline tools (-w) */ + time_stamp max_ts = {0}, min_ts = {0}, first_ts = {0}; + int64_t tgt_max = 0; + const int cores = em_core_count(); + uint64_t system_used = time_to_ns(time_diff(eo_ctx->stopped, eo_ctx->started)); + + for (int c = 0; c < cores; c++) { + core_data *cdat = &eo_ctx->cdat[c]; + uint64_t eo_used = time_to_ns(cdat->acc_time); + double perc = (double)eo_used / (double)system_used * 100; + + if (perc > 100) + perc = 100; + APPL_PRINT("STAT_CORE [%d]: %d tmos, %d jobs, EO used %.1f%% CPU time\n", + c, cdat->count, cdat->jobs, perc); + if (perc > eo_ctx->global_stat.max_cpu) + eo_ctx->global_stat.max_cpu = round(perc); + eo_ctx->global_stat.num_tmo += cdat->count; + } + + for (int id = 0; id < g_options.num_periodic; id++) { /* each timeout */ + tmo_setup *tmo_data = &eo_ctx->tmo_data[id]; + int64_t max_early = 0; + int num = do_one_tmo(id, eo_ctx, &min_ts, &max_ts, &first_ts, &tgt_max, &max_early); + + APPL_PRINT("STAT-TMO [%d]: %d tmos (tmr#%d), period %luns (", + id, num, tmo_data->tidx, tmo_data->period_ns); + if (num > 1) { + int64_t maxdiff = (int64_t)time_to_ns(max_ts) - + (int64_t)tmo_data->period_ns; + + int64_t mindiff = (int64_t)time_to_ns(min_ts) - + (int64_t)tmo_data->period_ns; + + APPL_PRINT("%lu ticks), interval %ldns ... +%ldns", + tmo_data->ticks, mindiff, maxdiff); + APPL_PRINT(" (%ldus ... +%ldus)\n", mindiff / 1000, maxdiff / 1000); + APPL_PRINT(" - Max diff from target %.2fus\n", (double)tgt_max / 1000); + if (llabs(tgt_max) > llabs(eo_ctx->global_stat.max_dev_ns)) + eo_ctx->global_stat.max_dev_ns = tgt_max; + if (max_early < eo_ctx->global_stat.max_early_ns) + eo_ctx->global_stat.max_early_ns = max_early; + } else { + APPL_PRINT("%lu ticks), 1st period %lu\n", + tmo_data->ticks, time_to_ns(first_ts)); + } + if (num == 0) + APPL_PRINT(" ERROR - no timeouts received\n"); + } + + if (!g_options.dispatch) + return; + + /* + * g_options.dispatch set + * + * Calculate EO rcv min-max-avg: + */ + uint64_t min = UINT64_MAX, max = 0, avg = 0; + time_stamp prev_ts = { 0 }; + int prev_count = 0; + int num = 0; + + for (int c = 0; c < cores; c++) { + for (int i = 0; i < g_options.tracebuf; i++) { + core_data *cdat = &eo_ctx->cdat[c]; + + if (cdat->trc[i].op == OP_PROF_ENTER_CB) { + prev_ts = cdat->trc[i].ts; + prev_count = cdat->trc[i].count; + } else if (cdat->trc[i].op == OP_PROF_EXIT_CB) { + time_stamp diff_ts; + uint64_t ns; + + if (prev_count != cdat->trc[i].count) + APPL_PRINT("No enter cnt=%d\n", prev_count); + + diff_ts = time_diff(cdat->trc[i].ts, prev_ts); + ns = time_to_ns(diff_ts); + + if (ns < min) + min = ns; + if (ns > max) + max = ns; + avg += ns; + num++; + } + } + } + + APPL_PRINT("%d dispatcher enter-exit samples\n", num); + APPL_PRINT("PROF-DISPATCH rcv time: min %luns, max %luns, avg %luns\n", + min, max, num > 0 ? avg / num : 0); + + if (max > eo_ctx->global_stat.max_dispatch) + eo_ctx->global_stat.max_dispatch = max; +} + +void profile_statistics(e_op op, int cores, app_eo_ctx_t *eo_ctx) +{ + uint64_t min = UINT64_MAX; + uint64_t max = 0, avg = 0, num = 0; + uint64_t t; + + for (int c = 0; c < cores; c++) { + for (int i = 0; i < g_options.tracebuf; i++) { + if (eo_ctx->cdat[c].trc[i].op == op) { + t = time_to_ns(eo_ctx->cdat[c].trc[i].linuxt); + if (min > t) + min = t; + if (max < t) + max = t; + avg += t; + num++; + } + } + } + if (num) + APPL_PRINT("%s: %lu samples: min %luns, max %luns, avg %luns\n", + op_labels[op], num, min, max, avg / num); +} + +void profile_all_stats(int cores, app_eo_ctx_t *eo_ctx) +{ + APPL_PRINT("API profile statistics:\n"); + profile_statistics(OP_PROF_CREATE, cores, eo_ctx); + profile_statistics(OP_PROF_SET, cores, eo_ctx); + profile_statistics(OP_PROF_ACK, cores, eo_ctx); + profile_statistics(OP_PROF_DELETE, cores, eo_ctx); + profile_statistics(OP_PROF_CANCEL, cores, eo_ctx); +} + +void analyze(app_eo_ctx_t *eo_ctx) +{ + int cores = em_core_count(); + int cancelled = 0; + int job_del = 0; + + timing_statistics(eo_ctx); + + if (g_options.profile) + profile_all_stats(cores, eo_ctx); + + for (int c = 0; c < cores; c++) { + cancelled += eo_ctx->cdat[c].cancelled; + job_del += eo_ctx->cdat[c].jobs_deleted; + } + + show_global_stats(eo_ctx); + + /* write trace file */ + if (g_options.csv != NULL) + write_trace(eo_ctx, g_options.csv); + + APPL_PRINT("%d/%d timeouts were cancelled\n", cancelled, g_options.num_periodic); + + if (g_options.bg_events) + APPL_PRINT("%d/%d bg jobs were deleted\n", job_del, g_options.bg_events); + if (g_options.mz_mb) + APPL_PRINT("%lu memzeros\n", eo_ctx->mz_count); + double span = time_to_ns(eo_ctx->stopped) - time_to_ns(eo_ctx->started); + + span /= 1000000000; + APPL_PRINT("Timer runtime %fs\n", span); + + test_fatal_if(cancelled != g_options.num_periodic, + "Not all tmos deleted (did not arrive at all?)\n"); +} + +int add_trace(app_eo_ctx_t *eo_ctx, int id, e_op op, uint64_t ns, int count, int tidx) +{ + int core = em_core_id(); + tmo_trace *tmo = &eo_ctx->cdat[core].trc[eo_ctx->cdat[core].count]; + + if (eo_ctx->cdat[core].count < g_options.tracebuf) { + if (op < OP_PROF_ACK && (tidx != -1)) /* to be a bit faster for profiling */ + tmo->tick = em_timer_current_tick(eo_ctx->test_tmr[tidx]); + tmo->op = op; + tmo->id = id; + tmo->ts = get_time(); + tmo->linuxt.u64 = ns; + tmo->count = count; + tmo->tidx = tidx; + eo_ctx->cdat[core].count++; + } + + return (eo_ctx->cdat[core].count >= g_options.stoplim) ? 0 : 1; +} + +void send_bg_events(app_eo_ctx_t *eo_ctx) +{ + for (int n = 0; n < g_options.bg_events; n++) { + em_event_t event = em_alloc(sizeof(app_msg_t), + EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate bg event!\n"); + app_msg_t *msg = em_event_pointer(event); + + msg->command = CMD_BGWORK; + msg->count = 0; + msg->id = n + 1; + msg->arg = g_options.bg_time_ns; + test_fatal_if(em_send(event, eo_ctx->bg_q) != EM_OK, "Can't allocate bg event!\n"); + } +} + +void start_periodic(app_eo_ctx_t *eo_ctx) +{ + app_msg_t *msg; + em_event_t event; + em_tmo_t tmo; + em_tmo_flag_t flag = EM_TMO_FLAG_PERIODIC; + time_stamp t1 = {0}; + uint64_t max_period = 0; + int tidx; + + if (g_options.noskip) + flag |= EM_TMO_FLAG_NOSKIP; + eo_ctx->stop_sent = 0; + eo_ctx->started = get_time(); + + for (int i = 0; i < g_options.num_periodic; i++) { + event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, + "Can't allocate test event (%ldB)!\n", + sizeof(app_msg_t)); + + msg = em_event_pointer(event); + msg->command = CMD_TMO; + msg->count = 0; + msg->id = i; + tidx = random() % g_options.num_timers; + msg->tidx = tidx; + + if (eo_ctx->tmo_data[i].handle == EM_TMO_UNDEF) { /* not -q */ + if (g_options.profile) + t1 = get_time(); + tmo = em_tmo_create(m_shm->test_tmr[tidx], flag, eo_ctx->test_q); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_CREATE, msg); + test_fatal_if(tmo == EM_TMO_UNDEF, "Can't allocate test_tmo!\n"); + eo_ctx->tmo_data[i].handle = tmo; + } + msg->tmo = eo_ctx->tmo_data[i].handle; + eo_ctx->tmo_data[i].tidx = tidx; + + double ns = 1000000000 / (double)eo_ctx->test_hz; + uint64_t period; + uint64_t first = 0; + em_status_t stat; + + if (g_options.period_ns) { + eo_ctx->tmo_data[i].period_ns = g_options.period_ns; + } else { /* 0: use random */ + eo_ctx->tmo_data[i].period_ns = random_tmo_ns(); + } + if (max_period < eo_ctx->tmo_data[i].period_ns) + max_period = eo_ctx->tmo_data[i].period_ns; + period = round((double)eo_ctx->tmo_data[i].period_ns / ns); + + if (EXTRA_PRINTS && i == 0) { + APPL_PRINT("Timer Hz %lu ", eo_ctx->test_hz); + APPL_PRINT("= Period ns: %f => period %lu ticks\n", ns, period); + } + + test_fatal_if(period < 1, "timer resolution is too low!\n"); + + if (g_options.first_ns < 0) /* use random */ + eo_ctx->tmo_data[i].first_ns = random_tmo_ns(); + else if (g_options.first_ns == 0) /* use period */ + eo_ctx->tmo_data[i].first_ns = eo_ctx->tmo_data[i].period_ns; + else + eo_ctx->tmo_data[i].first_ns = g_options.first_ns; + + first = round((double)eo_ctx->tmo_data[i].first_ns / ns); + if (!first) + first = 1; + eo_ctx->tmo_data[i].first = first; + + eo_ctx->tmo_data[i].start_ts = get_time(); + eo_ctx->tmo_data[i].start = em_timer_current_tick(m_shm->test_tmr[tidx]); + first += eo_ctx->tmo_data[i].start; + if (g_options.profile) + t1 = get_time(); + stat = em_tmo_set_periodic(eo_ctx->tmo_data[i].handle, first, period, event); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_SET, msg); + + if (unlikely(stat != EM_OK)) { + if (EXTRA_PRINTS) { + em_timer_tick_t now = em_timer_current_tick(eo_ctx->test_tmr[tidx]); + + APPL_PRINT("FAILED to set tmo, stat=%d: first=%lu, ", stat, first); + APPL_PRINT("now %lu (diff %ld), period=%lu\n", + now, (int64_t)first - (int64_t)now, period); + APPL_PRINT("(first_ns %lu)\n", eo_ctx->tmo_data[i].first_ns); + } + test_fatal_if(1, "Can't activate test tmo!\n"); + } + + eo_ctx->tmo_data[i].ack_late = 0; + eo_ctx->tmo_data[i].ticks = period; + eo_ctx->max_period = max_period; + eo_ctx->cooloff = (max_period / 1000000000ULL * 2) + 1; + if (eo_ctx->cooloff < 4) + eo_ctx->cooloff = 4; /* HB periods (sec) */ + } +} + +void add_prof(app_eo_ctx_t *eo_ctx, time_stamp t1, e_op op, app_msg_t *msg) +{ + time_stamp dif = time_diff(get_time(), t1); + + add_trace(eo_ctx, msg->id, op, dif.u64, msg->count, -1); + /* if this filled the buffer it's handled on next tmo */ +} + +int handle_periodic(app_eo_ctx_t *eo_ctx, em_event_t event) +{ + int core = em_core_id(); + app_msg_t *msg = (app_msg_t *)em_event_pointer(event); + int reuse = 1; + e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); + time_stamp t1 = {0}; + em_tmo_stats_t ctrs = { 0 }; /* init to avoid gcc warning with LTO */ + em_status_t ret; + + msg->count++; + + /* this is to optionally test abnormal exits only */ + if (unlikely(g_options.abort != 0) && abs(g_options.abort) <= msg->count) { + if (g_options.abort < 0) { /* cause segfault to test exception here */ + uint64_t *fault = NULL; + /* coverity[FORWARD_NULL] */ + msg->arg = *fault; + } else { + abort(); + } + } + + if (likely(state == STATE_RUN)) { /* add tmo trace */ + if (!add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx)) + send_stop(eo_ctx); /* triggers state change */ + + if (g_options.work_prop) { + uint64_t work = random_work_ns(&eo_ctx->cdat[core].rng); + + if (work) { /* add extra delay */ + time_stamp t2; + uint64_t ns = time_to_ns(get_time()); + + do { + t2 = get_time(); + } while (time_to_ns(t2) < (ns + work)); + add_trace(eo_ctx, msg->id, OP_WORK, work, msg->count, -1); + } + } + + /* only ack while in running state */ + add_trace(eo_ctx, msg->id, OP_ACK, 0, msg->count, msg->tidx); + if (g_options.profile) + t1 = get_time(); + em_status_t stat = em_tmo_ack(msg->tmo, event); + + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_ACK, msg); + if (unlikely(stat != EM_OK)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "ack() fail!\n"); + + } else if (state == STATE_COOLOFF) { /* trace, but cancel */ + em_event_t tmo_event = EM_EVENT_UNDEF; + + add_trace(eo_ctx, msg->id, OP_TMO, 0, msg->count, msg->tidx); + em_tmo_get_stats(msg->tmo, &ctrs); + APPL_PRINT("STAT-ACK [%d]: %lu acks, %lu late, %lu skips\n", + msg->id, ctrs.num_acks, ctrs.num_late_ack, ctrs.num_period_skips); + eo_ctx->tmo_data[msg->id].ack_late = ctrs.num_late_ack; + eo_ctx->global_stat.num_late += ctrs.num_late_ack; + + if (unlikely(msg->id >= g_options.num_periodic)) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Corrupted tmo msg?\n"); + + if (g_options.profile) + t1 = get_time(); + if (g_options.no_del) { /* don't delete each round */ + ret = em_tmo_cancel(msg->tmo, &tmo_event); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_CANCEL, msg); + test_fatal_if(ret == EM_OK, "tmo_cancel ok, expecting fail here!\n"); + } else { + ret = em_tmo_delete(msg->tmo, &tmo_event); + if (g_options.profile) + add_prof(eo_ctx, t1, OP_PROF_DELETE, msg); + test_fatal_if(ret != EM_OK, "tmo_delete failed, ret %" PRI_STAT "!\n", ret); + eo_ctx->tmo_data[msg->id].handle = EM_TMO_UNDEF; + } + + eo_ctx->cdat[core].cancelled++; + if (unlikely(tmo_event != EM_EVENT_UNDEF)) { /* not expected as we have the event */ + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "periodic tmo delete returned evt!\n"); + } + add_trace(eo_ctx, msg->id, OP_CANCEL, 0, msg->count, msg->tidx); + reuse = 0; /* free this last tmo event of canceled tmo */ + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "Timeout in state %s!\n", state_labels[state]); + } + return reuse; +} + +void analyze_measure(app_eo_ctx_t *eo_ctx, uint64_t linuxns, uint64_t tmrtick, + time_stamp timetick) +{ + uint64_t linux_t2 = linux_time_ns(); + time_stamp time_t2 = get_time(); + uint64_t tmr_t2 = em_timer_current_tick(eo_ctx->test_tmr[0]); + + linux_t2 = linux_t2 - linuxns; + time_t2 = time_diff(time_t2, timetick); + tmr_t2 = tmr_t2 - tmrtick; + APPL_PRINT("%lu timer ticks in %luns (linux time) ", tmr_t2, linux_t2); + double hz = 1000000000 / + ((double)linux_t2 / (double)tmr_t2); + APPL_PRINT("=> %.1fHz (%.1fMHz). Timer reports %luHz\n", hz, hz / 1000000, eo_ctx->test_hz); + eo_ctx->meas_test_hz = round(hz); + hz = 1000000000 / ((double)linux_t2 / (double)time_t2.u64); + APPL_PRINT("Timestamp measured: %.1fHz (%.1fMHz)\n", hz, hz / 1000000); + eo_ctx->meas_time_hz = round(hz); + + if (g_options.cpucycles == 1) /* use measured */ + eo_ctx->time_hz = eo_ctx->meas_time_hz; + if (g_options.cpucycles > 1) /* freq given */ + eo_ctx->time_hz = (uint64_t)g_options.cpucycles; + + test_fatal_if(tmr_t2 < 1, "TIMER SEEMS NOT RUNNING AT ALL!?"); +} + +int do_memzero(app_msg_t *msg, app_eo_ctx_t *eo_ctx) +{ + static int count; + + add_trace(eo_ctx, -1, OP_MEMZERO, g_options.mz_mb, msg->count, -1); + if (eo_ctx->mz_data == NULL) { /* first time we only allocate */ + if (g_options.mz_huge) { + eo_ctx->mz_data = mmap(NULL, g_options.mz_mb * 1024UL * 1024UL, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE | + MAP_HUGETLB | MAP_LOCKED, + -1, 0); + if (eo_ctx->mz_data == MAP_FAILED) + eo_ctx->mz_data = NULL; + } else { + eo_ctx->mz_data = malloc(g_options.mz_mb * 1024UL * 1024UL); + } + test_fatal_if(eo_ctx->mz_data == NULL, "mz_mem reserve failed!"); + } else { + memset(eo_ctx->mz_data, 0, g_options.mz_mb * 1024UL * 1024UL); + eo_ctx->mz_count++; + } + add_trace(eo_ctx, -1, OP_MEMZERO_END, g_options.mz_mb, count, -1); + __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED); + return 0; +} + +int do_bg_work(em_event_t evt, app_eo_ctx_t *eo_ctx) +{ + app_msg_t *msg = (app_msg_t *)em_event_pointer(evt); + time_stamp t1 = get_time(); + time_stamp ts; + int32_t rnd; + int core = em_core_id(); + uint64_t sum = 0; + + if (__atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE) != STATE_RUN) { + eo_ctx->cdat[core].jobs_deleted++; + if (EXTRA_PRINTS) + APPL_PRINT("Deleting job after %u iterations\n", msg->count); + return 0; /* stop & delete */ + } + + if (g_options.jobs) + add_trace(eo_ctx, -1, OP_BGWORK, msg->arg, msg->count, -1); + + msg->count++; + eo_ctx->cdat[core].jobs++; + int blocks = g_options.bg_size / g_options.bg_chunk; + + random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); + rnd = rnd % blocks; + uint64_t *dptr = (uint64_t *)((uintptr_t)eo_ctx->bg_data + rnd * g_options.bg_chunk); + /* printf("%d: %p - %p\n", rnd, eo_ctx->bg_data, dptr); */ + + do { + /* jump around memory reading from selected chunk */ + random_r(&eo_ctx->cdat[core].rng.rdata, &rnd); + rnd = rnd % (g_options.bg_chunk / sizeof(uint64_t)); + /* printf("%d: %p - %p\n", rnd, eo_ctx->bg_data, dptr+rnd); */ + sum += *(dptr + rnd); + ts = time_diff(get_time(), t1); + } while (time_to_ns(ts) < msg->arg); + + *dptr = sum; + + if (g_options.mz_mb && msg->id == 1) { /* use only one job stream for memzero */ + static time_stamp last_mz = {0}; + + if (msg->count < 10) /* don't do mz before some time */ + last_mz = get_time(); + ts = time_diff(get_time(), last_mz); + if (time_to_ns(ts) > g_options.mz_ns) { + do_memzero(msg, eo_ctx); + last_mz = get_time(); + } + } + + test_fatal_if(em_send(evt, eo_ctx->bg_q) != EM_OK, "Failed to send BG job event!"); + return 1; +} + +void handle_heartbeat(app_eo_ctx_t *eo_ctx, em_event_t event) +{ + app_msg_t *msg = (app_msg_t *)em_event_pointer(event); + int cores = em_core_count(); + int done = 0; + e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_SEQ_CST); + static int runs; + static uint64_t linuxns; + static uint64_t tmrtick; + static time_stamp timetick; + + /* heartbeat runs states of the test */ + + msg->count++; + add_trace(eo_ctx, -1, OP_HB, linux_time_ns(), msg->count, -1); + + if (EXTRA_PRINTS) + APPL_PRINT("."); + + switch (state) { + case STATE_INIT: + if (msg->count > eo_ctx->last_hbcount + INIT_WAIT) { + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + eo_ctx->last_hbcount = msg->count; + APPL_PRINT("ROUND %d\n", runs + 1); + APPL_PRINT("->Starting tick measurement\n"); + } + break; + + case STATE_MEASURE: /* measure timer frequencies */ + if (linuxns == 0) { + linuxns = linux_time_ns(); + timetick = get_time(); + /* use timer[0] for this always */ + tmrtick = em_timer_current_tick(eo_ctx->test_tmr[0]); + } + if (msg->count > eo_ctx->last_hbcount + MEAS_PERIOD) { + analyze_measure(eo_ctx, linuxns, tmrtick, timetick); + linuxns = 0; + /* start new run */ + if (g_options.num_runs > 1) + APPL_PRINT("** Round %d\n", runs + 1); + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + } + break; + + case STATE_STABILIZE: /* give some time to get up */ + if (g_options.bg_events) + send_bg_events(eo_ctx); + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + if (EXTRA_PRINTS) + APPL_PRINT("->Starting tmos\n"); + start_periodic(eo_ctx); + eo_ctx->last_hbcount = msg->count; + break; + + case STATE_RUN: /* run the test, avoid prints */ + for (int i = 0; i < cores; i++) { + if (eo_ctx->cdat[i].count >= + g_options.tracebuf) { + done++; + break; + } + } + if (done) { + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + eo_ctx->last_hbcount = msg->count; + if (EXTRA_PRINTS) + APPL_PRINT("->All cores done\n"); + } + break; + + case STATE_COOLOFF: /* stop further timeouts */ + if (msg->count > (eo_ctx->last_hbcount + eo_ctx->cooloff)) { + __atomic_fetch_add(&eo_ctx->state, 1, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), eo_ctx->state, -1); + eo_ctx->last_hbcount = msg->count; + if (EXTRA_PRINTS) + APPL_PRINT("->Starting analyze\n"); + } + break; + + case STATE_ANALYZE: /* expected to be stopped, analyze data */ + APPL_PRINT("\n"); + analyze(eo_ctx); + cleanup(eo_ctx); + /* re-start test cycle */ + __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); + runs++; + if (runs >= g_options.num_runs && g_options.num_runs != 0) { + /* terminate test app */ + APPL_PRINT("%d runs done\n", runs); + raise(SIGINT); + } + eo_ctx->last_hbcount = msg->count; + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid test state"); + } + + /* heartbeat never stops */ + if (em_tmo_ack(eo_ctx->heartbeat_tmo, event) != EM_OK) + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "HB ack() fail!\n"); +} + +void usage(void) +{ + printf("%s\n", instructions); + + printf("Usage:\n"); + for (int i = 0; ; i++) { + if (longopts[i].name == NULL || descopts[i] == NULL) + break; + printf("--%s or -%c: %s\n", longopts[i].name, longopts[i].val, descopts[i]); + } +} + +int parse_my_args(int first, int argc, char *argv[]) +{ + optind = first + 1; /* skip '--' */ + while (1) { + int opt; + int long_index; + char *endptr; + int64_t num; + + opt = getopt_long(argc, argv, shortopts, longopts, &long_index); + + if (opt == -1) + break; /* No more options */ + + switch (opt) { + case 's': { + g_options.noskip = 1; + } + break; + case 'a': { + g_options.profile = 1; + } + break; + case 'b': { + g_options.jobs = 1; + } + break; + case 'd': { + g_options.dispatch = 1; + } + break; + case 'i': { + g_options.info_only = 1; + } + break; + case 'u': { + g_options.usehuge = 1; + } + break; + case 'q': { + g_options.no_del = 1; + } + break; + case 'g': { + g_options.cpucycles = 1; + if (optarg != NULL) { /* optional arg */ + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 2) + return 0; + g_options.cpucycles = num; + } + } + break; + case 'w': { /* optional arg */ + g_options.csv = "stdout"; + if (optarg != NULL) + g_options.csv = optarg; + } + break; + case 'm': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 1) + return 0; + g_options.max_period_ns = (uint64_t)num; + } + break; + case 'l': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 1) + return 0; + g_options.min_period_ns = num; + } + break; + case 't': { + unsigned long size, perc; + + num = sscanf(optarg, "%lu,%lu", &size, &perc); + if (num == 0 || size < 10 || + sizeof(tmo_trace) * size > MAX_TMO_BYTES) + return 0; + g_options.tracebuf = size; + if (num == 2 && perc > 100) + return 0; + if (num == 2) + g_options.stoplim = ((perc * size) / 100); + else + g_options.stoplim = ((STOP_THRESHOLD * size) / 100); + } + break; + case 'e': { + unsigned int min_us, max_us, prop; + + if (sscanf(optarg, "%u,%u,%u", &min_us, &max_us, &prop) != 3) + return 0; + if (prop > 100 || max_us < 1) + return 0; + g_options.min_work_ns = 1000ULL * min_us; + g_options.max_work_ns = 1000ULL * max_us; + g_options.work_prop = prop; + } + break; + case 'o': { + unsigned int mb; + uint64_t ms; + unsigned int hp = 0; + + if (sscanf(optarg, "%u,%lu,%u", &mb, &ms, &hp) < 2) + return 0; + if (mb < 1 || ms < 1) + return 0; + g_options.mz_mb = mb; + g_options.mz_ns = ms * 1000UL * 1000UL; + if (hp) + g_options.mz_huge = 1; + } + break; + case 'j': { + unsigned int evts, us, kb, chunk; + + num = sscanf(optarg, "%u,%u,%u,%u", &evts, &us, &kb, &chunk); + if (num == 0 || evts < 1) + return 0; + g_options.bg_events = evts; + if (num > 1 && us) + g_options.bg_time_ns = us * 1000ULL; + if (num > 2 && kb) + g_options.bg_size = kb * 1024; + if (num > 3 && chunk) + g_options.bg_chunk = chunk * 1024; + if (g_options.bg_chunk > g_options.bg_size) + return 0; + } + break; + case 'n': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.num_periodic = num; + } + break; + case 'p': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 0) + return 0; + g_options.period_ns = num; + } + break; + case 'f': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < -1) + return 0; + g_options.first_ns = num; + } + break; + case 'c': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.clock_src = num; + } + break; + case 'r': { + if (!arg_to_ns(optarg, &num)) + return 0; + if (num < 0) + return 0; + g_options.res_ns = num; + } + break; + case 'z': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.res_hz = num; + g_options.res_ns = 0; + } + break; + case 'x': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 0) + return 0; + g_options.num_runs = num; + } + break; + case 'k': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0') + return 0; + g_options.abort = num; + } + break; + + case 'y': { + num = strtol(optarg, &endptr, 0); + if (*endptr != '\0' || num < 1) + return 0; + g_options.num_timers = num; + } + break; + + case 'h': + default: + opterr = 0; + usage(); + return 0; + } + } + + optind = 1; /* cm_setup() to parse again */ + return 1; +} + +/** + * Before EM - Init + */ +void test_init(void) +{ + int core = em_core_id(); + + /* first core creates ShMem */ + if (core == 0) { + m_shm = env_shared_reserve("Timer_test", sizeof(timer_app_shm_t)); + /* initialize it */ + if (m_shm) + memset(m_shm, 0, sizeof(timer_app_shm_t)); + + APPL_PRINT("%ldk shared memory for app context\n", sizeof(timer_app_shm_t) / 1000); + + } else { + m_shm = env_shared_lookup("Timer_test"); + } + + if (m_shm == NULL) { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, + "ShMem init failed on EM-core: %u", + em_core_id()); + } + + APPL_PRINT("core %d: %s done\n", core, __func__); +} + +/** + * Startup of the timer test EM application + */ +void test_start(appl_conf_t *const appl_conf) +{ + em_eo_t eo; + em_timer_attr_t attr; + em_queue_t queue; + em_status_t stat; + app_eo_ctx_t *eo_ctx; + em_timer_res_param_t res_capa; + em_timer_capability_t capa = { 0 }; /* init to avoid gcc warning with LTO */ + em_core_mask_t mask; + em_queue_group_t grp; + em_atomic_group_t agrp; + + if (appl_conf->num_procs > 1) { + APPL_PRINT("\n!! Multiple PROCESS MODE NOT SUPPORTED !!\n\n"); + raise(SIGINT); + return; + } + + if (appl_conf->num_pools >= 1) + m_shm->pool = appl_conf->pools[0]; + else + m_shm->pool = EM_POOL_DEFAULT; + + eo_ctx = &m_shm->eo_context; + memset(eo_ctx, 0, sizeof(app_eo_ctx_t)); + eo_ctx->tmo_data = calloc(g_options.num_periodic, sizeof(tmo_setup)); + test_fatal_if(eo_ctx->tmo_data == NULL, "Can't alloc tmo_setups"); + + eo = em_eo_create(APP_EO_NAME, app_eo_start, app_eo_start_local, + app_eo_stop, app_eo_stop_local, app_eo_receive, + eo_ctx); + test_fatal_if(eo == EM_EO_UNDEF, "Failed to create EO!"); + + stat = em_register_error_handler(my_error_handler); + test_fatal_if(stat != EM_OK, "Failed to register error handler"); + + /* Create atomic group and queues for control messages */ + stat = em_queue_group_get_mask(EM_QUEUE_GROUP_DEFAULT, &mask); + test_fatal_if(stat != EM_OK, "Failed to get default Q grp mask!"); + + grp = em_queue_group_create_sync("CTRL_GRP", &mask); + test_fatal_if(grp == EM_QUEUE_GROUP_UNDEF, "Failed to create Q grp!"); + agrp = em_atomic_group_create("CTRL_AGRP", grp); + test_fatal_if(agrp == EM_ATOMIC_GROUP_UNDEF, "Failed to create atomic grp!"); + eo_ctx->agrp = agrp; + + queue = em_queue_create_ag("Control Q", EM_QUEUE_PRIO_NORMAL, agrp, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create hb queue!"); + eo_ctx->hb_q = queue; + + queue = em_queue_create_ag("Stop Q", EM_QUEUE_PRIO_HIGHEST, agrp, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create stop queue!"); + eo_ctx->stop_q = queue; + + /* parallel high priority for timeout handling*/ + queue = em_queue_create("Tmo Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_HIGH, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to create queue!"); + eo_ctx->test_q = queue; + + /* another parallel low priority for background work*/ + queue = em_queue_create("BG Q", + EM_QUEUE_TYPE_PARALLEL, + EM_QUEUE_PRIO_LOWEST, + EM_QUEUE_GROUP_DEFAULT, NULL); + stat = em_eo_add_queue_sync(eo, queue); + test_fatal_if(stat != EM_OK, "Failed to add queue!"); + eo_ctx->bg_q = queue; + + /* create two timers so HB and tests can be independent */ + em_timer_attr_init(&attr); + strncpy(attr.name, "HBTimer", EM_TIMER_NAME_LEN); + m_shm->hb_tmr = em_timer_create(&attr); + test_fatal_if(m_shm->hb_tmr == EM_TIMER_UNDEF, + "Failed to create HB timer!"); + + /* test timer */ + test_fatal_if(g_options.res_ns && g_options.res_hz, "Give resolution in ns OR hz!"); + + em_timer_attr_init(&attr); + stat = em_timer_capability(&capa, g_options.clock_src); + + APPL_PRINT("Timer capability for clksrc %d:\n", g_options.clock_src); + APPL_PRINT(" maximum timers: %d\n", capa.max_timers); + APPL_PRINT(" max_res %luns %luhz min_tmo %lu max_tmo %lu\n", + capa.max_res.res_ns, capa.max_res.res_hz, + capa.max_res.min_tmo, capa.max_res.max_tmo); + APPL_PRINT(" max_tmo %luns %luhz min_tmo %lu max_tmo %lu\n", + capa.max_tmo.res_ns, capa.max_tmo.res_hz, + capa.max_tmo.min_tmo, capa.max_tmo.max_tmo); + + test_fatal_if(stat != EM_OK, "Given clk_src is not supported\n"); + memset(&res_capa, 0, sizeof(em_timer_res_param_t)); + if (!g_options.res_hz) { + res_capa.res_ns = g_options.res_ns == 0 ? capa.max_res.res_ns : g_options.res_ns; + APPL_PRINT("Trying %lu ns resolution capability on clk %d\n", + res_capa.res_ns, g_options.clock_src); + } else { + res_capa.res_hz = g_options.res_hz; + APPL_PRINT("Trying %lu Hz resolution capability on clk %d\n", + res_capa.res_hz, g_options.clock_src); + } + + APPL_PRINT("Asking timer capability for clksrc %d:\n", g_options.clock_src); + APPL_PRINT("%luns %luhz min_tmo %lu max_tmo %lu\n", + res_capa.res_ns, res_capa.res_hz, + res_capa.min_tmo, res_capa.max_tmo); + stat = em_timer_res_capability(&res_capa, g_options.clock_src); + APPL_PRINT("-> Timer res_capability:\n"); + APPL_PRINT("max_res %luns %luhz min_tmo %lu max_tmo %lu\n", + res_capa.res_ns, res_capa.res_hz, + res_capa.min_tmo, res_capa.max_tmo); + test_fatal_if(stat != EM_OK, "Given resolution is not supported (ret %d)\n", stat); + + if (!g_options.max_period_ns) { + g_options.max_period_ns = DEF_MAX_PERIOD; + if (g_options.max_period_ns > res_capa.max_tmo) + g_options.max_period_ns = res_capa.max_tmo; + } + if (!g_options.min_period_ns) { + g_options.min_period_ns = res_capa.res_ns * DEF_MIN_PERIOD; + if (g_options.min_period_ns < res_capa.min_tmo) + g_options.min_period_ns = res_capa.min_tmo; + } + + if (g_options.info_only) { /* stop here */ + raise(SIGINT); + } else { + strncpy(attr.name, "TestTimer", EM_TIMER_NAME_LEN); + attr.resparam = res_capa; + if (g_options.res_hz) /* can only have one */ + attr.resparam.res_ns = 0; + else + attr.resparam.res_hz = 0; + attr.num_tmo = g_options.num_periodic; + attr.resparam.max_tmo = g_options.max_period_ns; /* don't need more */ + for (int i = 0; i < g_options.num_timers; i++) { + m_shm->test_tmr[i] = em_timer_create(&attr); + test_fatal_if(m_shm->test_tmr[i] == EM_TIMER_UNDEF, "Failed to create test timer!"); + eo_ctx->test_tmr[i] = m_shm->test_tmr[i]; + } + APPL_PRINT("%d test timers created\n", g_options.num_timers); + g_options.res_ns = attr.resparam.res_ns; + } + + /* Start EO */ + stat = em_eo_start_sync(eo, NULL, NULL); + test_fatal_if(stat != EM_OK, "Failed to start EO!"); + + mlockall(MCL_FUTURE); +} + +void +test_stop(appl_conf_t *const appl_conf) +{ + const int core = em_core_id(); + em_status_t ret; + em_eo_t eo; + + if (appl_conf->num_procs > 1) { + APPL_PRINT("%s(): skip\n", __func__); + return; + } + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + eo = em_eo_find(APP_EO_NAME); + test_fatal_if(eo == EM_EO_UNDEF, "Could not find EO:%s", APP_EO_NAME); + + ret = em_eo_stop_sync(eo); + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " stop:%" PRI_STAT "", eo, ret); + ret = em_eo_delete(eo); + test_fatal_if(ret != EM_OK, "EO:%" PRI_EO " delete:%" PRI_STAT "", eo, ret); + + ret = em_timer_delete(m_shm->hb_tmr); + test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->hb_tmr, ret); + for (int i = 0; i < g_options.num_timers; i++) { + if (m_shm->test_tmr[i] != EM_TIMER_UNDEF) { + ret = em_timer_delete(m_shm->test_tmr[i]); + test_fatal_if(ret != EM_OK, "Timer:%" PRI_TMR " delete:%" PRI_STAT "", + m_shm->test_tmr[i], ret); + } + } + free(m_shm->eo_context.tmo_data); +} + +void test_term(void) +{ + int core = em_core_id(); + + APPL_PRINT("%s() on EM-core %d\n", __func__, core); + + if (m_shm != NULL) { + em_unregister_error_handler(); + env_shared_free(m_shm); + m_shm = NULL; + } +} + +static em_status_t app_eo_start(void *eo_context, em_eo_t eo, const em_eo_conf_t *conf) +{ + #define PRINT_MAX_TMRS 4 + em_timer_attr_t attr; + em_timer_t tmr[PRINT_MAX_TMRS]; + int num_timers; + app_msg_t *msg; + struct timespec ts; + uint64_t period; + em_event_t event; + app_eo_ctx_t *eo_ctx = (app_eo_ctx_t *)eo_context; + + (void)eo; + (void)conf; + + if (g_options.info_only) + return EM_OK; + + num_timers = em_timer_get_all(tmr, PRINT_MAX_TMRS); + + for (int i = 0; + i < (num_timers > PRINT_MAX_TMRS ? PRINT_MAX_TMRS : num_timers); + i++) { + if (em_timer_get_attr(tmr[i], &attr) != EM_OK) { + APPL_ERROR("Can't get timer info\n"); + return EM_ERR_BAD_ID; + } + APPL_PRINT("Timer \"%s\" info:\n", attr.name); + APPL_PRINT(" -resolution: %" PRIu64 " ns\n", attr.resparam.res_ns); + APPL_PRINT(" -max_tmo: %" PRIu64 " ms\n", attr.resparam.max_tmo / 1000); + APPL_PRINT(" -num_tmo: %d\n", attr.num_tmo); + APPL_PRINT(" -clk_src: %d\n", attr.resparam.clk_src); + APPL_PRINT(" -tick Hz: %" PRIu64 " hz\n", + em_timer_get_freq(tmr[i])); + } + + APPL_PRINT("\nActive run options:\n"); + APPL_PRINT(" num timers: %d\n", g_options.num_timers); + APPL_PRINT(" num timeouts: %d\n", g_options.num_periodic); + if (g_options.res_hz) { + APPL_PRINT(" resolution: %lu Hz (%f MHz)\n", g_options.res_hz, + (double)g_options.res_hz / 1000000); + } else { + APPL_PRINT(" resolution: %lu ns (%fs)\n", g_options.res_ns, + (double)g_options.res_ns / 1000000000); + } + if (g_options.period_ns == 0) + APPL_PRINT(" period: random\n"); + else + APPL_PRINT(" period: %lu ns (%fs%s)\n", g_options.period_ns, + (double)g_options.period_ns / 1000000000, + g_options.period_ns == 0 ? " (random)" : ""); + if (g_options.first_ns == -1) + APPL_PRINT(" first period: random\n"); + else + APPL_PRINT(" first period: %ld ns (%fs%s)\n", g_options.first_ns, + (double)g_options.first_ns / 1000000000, + g_options.first_ns == 0 ? " (=period)" : ""); + APPL_PRINT(" max period: %luns (%fs)\n", g_options.max_period_ns, + (double)g_options.max_period_ns / 1000000000); + APPL_PRINT(" min period: %luns (%fs)\n", g_options.min_period_ns, + (double)g_options.min_period_ns / 1000000000); + APPL_PRINT(" csv: %s\n", + g_options.csv == NULL ? "(no)" : g_options.csv); + APPL_PRINT(" tracebuffer: %d tmo events (%luKiB)\n", + g_options.tracebuf, + g_options.tracebuf * sizeof(tmo_trace) / 1024); + APPL_PRINT(" stop limit: %d tmo events\n", g_options.stoplim); + APPL_PRINT(" use NOSKIP: %s\n", g_options.noskip ? "yes" : "no"); + APPL_PRINT(" profile API: %s\n", g_options.profile ? "yes" : "no"); + APPL_PRINT(" dispatch prof:%s\n", g_options.dispatch ? "yes" : "no"); + APPL_PRINT(" time stamps: %s\n", g_options.cpucycles ? + "CPU cycles" : "odp_time()"); + APPL_PRINT(" work propability:%u%%\n", g_options.work_prop); + if (g_options.work_prop) { + APPL_PRINT(" min_work: %luns\n", g_options.min_work_ns); + APPL_PRINT(" max_work: %luns\n", g_options.max_work_ns); + } + APPL_PRINT(" bg events: %u\n", g_options.bg_events); + eo_ctx->bg_data = NULL; + if (g_options.bg_events) { + APPL_PRINT(" bg work: %luus\n", g_options.bg_time_ns / 1000); + APPL_PRINT(" bg data: %ukiB\n", g_options.bg_size / 1024); + APPL_PRINT(" bg chunk: %ukiB (%u blks)\n", + g_options.bg_chunk / 1024, + g_options.bg_size / g_options.bg_chunk); + APPL_PRINT(" bg trace: %s\n", g_options.jobs ? "yes" : "no"); + + eo_ctx->bg_data = malloc(g_options.bg_size); + test_fatal_if(eo_ctx->bg_data == NULL, + "Can't allocate bg work data (%dkiB)!\n", + g_options.bg_size / 1024); + } + APPL_PRINT(" memzero: "); + if (g_options.mz_mb) + APPL_PRINT("%uMB %severy %lums\n", + g_options.mz_mb, + g_options.mz_huge ? "(mmap huge) " : "", + g_options.mz_ns / 1000000UL); + else + APPL_PRINT("no\n"); + + if (g_options.abort != 0) { + APPL_PRINT(" abort after: "); + if (g_options.abort) + APPL_PRINT("%d%s\n", + g_options.abort, g_options.abort < 0 ? "(segfault)" : ""); + else + APPL_PRINT("0 (no)\n"); + } + if (g_options.num_runs != 1) + APPL_PRINT(" delete tmos: %s", g_options.no_del ? "no" : "yes"); + + APPL_PRINT("\nTracing first %d tmo events\n", g_options.tracebuf); + + if (g_options.bg_events) + prefault(eo_ctx->bg_data, g_options.bg_size); + + /* create periodic timeout for heartbeat */ + eo_ctx->heartbeat_tmo = em_tmo_create(m_shm->hb_tmr, EM_TMO_FLAG_PERIODIC, eo_ctx->hb_q); + test_fatal_if(eo_ctx->heartbeat_tmo == EM_TMO_UNDEF, + "Can't allocate heartbeat_tmo!\n"); + + event = em_alloc(sizeof(app_msg_t), EM_EVENT_TYPE_SW, m_shm->pool); + test_fatal_if(event == EM_EVENT_UNDEF, "Can't allocate event (%ldB)!\n", + sizeof(app_msg_t)); + + msg = em_event_pointer(event); + msg->command = CMD_HEARTBEAT; + msg->count = 0; + msg->id = -1; + eo_ctx->hb_hz = em_timer_get_freq(m_shm->hb_tmr); + if (eo_ctx->hb_hz < 10) + APPL_ERROR("WARNING: HB timer hz very low!\n"); + else + APPL_PRINT("HB timer frequency is %lu\n", eo_ctx->hb_hz); + + period = eo_ctx->hb_hz; /* 1s */ + test_fatal_if(period < 1, "timer resolution is too low!\n"); + + /* linux time check */ + test_fatal_if(clock_getres(CLOCK_MONOTONIC, &ts) != 0, + "clock_getres() failed!\n"); + + period = ts.tv_nsec + (ts.tv_sec * 1000000000ULL); + eo_ctx->linux_hz = 1000000000ULL / period; + APPL_PRINT("Linux reports clock running at %" PRIu64 " hz\n", eo_ctx->linux_hz); + APPL_PRINT("ODP says time_global runs at %luHz\n", odp_time_global_res()); + if (!g_options.cpucycles) + eo_ctx->time_hz = odp_time_global_res(); + + /* start heartbeat */ + __atomic_store_n(&eo_ctx->state, STATE_INIT, __ATOMIC_SEQ_CST); + + em_status_t stat = em_tmo_set_periodic(eo_ctx->heartbeat_tmo, 0, eo_ctx->hb_hz, event); + + if (EXTRA_PRINTS && stat != EM_OK) + APPL_PRINT("FAILED to set HB tmo, stat=%d: period=%lu\n", stat, eo_ctx->hb_hz); + test_fatal_if(stat != EM_OK, "Can't activate heartbeat tmo!\n"); + + eo_ctx->test_hz = em_timer_get_freq(m_shm->test_tmr[0]); /* use timer[0] */ + test_fatal_if(eo_ctx->test_hz == 0, + "get_freq() failed, timer:%" PRI_TMR "", m_shm->test_tmr[0]); + + stat = em_dispatch_register_enter_cb(enter_cb); + test_fatal_if(stat != EM_OK, "enter_cb() register failed!"); + stat = em_dispatch_register_exit_cb(exit_cb); + test_fatal_if(stat != EM_OK, "exit_cb() register failed!"); + + srandom(time(NULL)); + if (g_options.max_work_ns > RAND_MAX || + g_options.max_period_ns > RAND_MAX) { + double s = (double)RAND_MAX / (double)eo_ctx->test_hz; + + APPL_PRINT("WARNING: rnd number range is less than max values (up to %.4fs)\n", s); + } + if (EXTRA_PRINTS) + APPL_PRINT("WARNING: extra prints enabled, expect some jitter\n"); + + return EM_OK; +} + +/** + * @private + * + * EO per thread start function. + */ +static em_status_t app_eo_start_local(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + int core = em_core_id(); + + (void)eo; + + if (EXTRA_PRINTS) + APPL_PRINT("EO local start\n"); + test_fatal_if(core >= MAX_CORES, "Too many cores!"); + eo_ctx->cdat[core].trc = allocate_tracebuf(g_options.tracebuf, sizeof(tmo_trace), + &eo_ctx->cdat[core].trc_size); + test_fatal_if(eo_ctx->cdat[core].trc == NULL, "Failed to allocate trace buffer!"); + eo_ctx->cdat[core].count = 0; + eo_ctx->cdat[core].cancelled = 0; + eo_ctx->cdat[core].jobs_deleted = 0; + eo_ctx->cdat[core].jobs = 0; + + memset(&eo_ctx->cdat[core].rng, 0, sizeof(rnd_state_t)); + initstate_r(time(NULL), eo_ctx->cdat[core].rng.rndstate, RND_STATE_BUF, + &eo_ctx->cdat[core].rng.rdata); + srandom_r(time(NULL), &eo_ctx->cdat[core].rng.rdata); + return EM_OK; +} + +/** + * @private + * + * EO stop function. + */ +static em_status_t app_eo_stop(void *eo_context, em_eo_t eo) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + em_event_t event = EM_EVENT_UNDEF; + em_status_t ret; + + if (EXTRA_PRINTS) + APPL_PRINT("EO stop\n"); + + if (eo_ctx->heartbeat_tmo != EM_TMO_UNDEF) { + em_tmo_delete(eo_ctx->heartbeat_tmo, &event); + eo_ctx->heartbeat_tmo = EM_TMO_UNDEF; + if (event != EM_EVENT_UNDEF) + em_free(event); + } + + /* cancel all test timers in case test didn't complete */ + int dcount = 0; + + for (int i = 0; i < g_options.num_periodic; i++) { + if (eo_ctx->tmo_data[i].handle != EM_TMO_UNDEF) { + event = EM_EVENT_UNDEF; + em_tmo_delete(eo_ctx->tmo_data[i].handle, &event); + eo_ctx->tmo_data[i].handle = EM_TMO_UNDEF; + if (event != EM_EVENT_UNDEF) + em_free(event); + dcount++; + } + } + if (dcount) + APPL_PRINT("NOTE: deleted %d still active tmos\n", dcount); + + ret = em_eo_remove_queue_all_sync(eo, EM_TRUE); /* remove and delete */ + test_fatal_if(ret != EM_OK, + "EO remove queue all:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); + + ret = em_atomic_group_delete(((app_eo_ctx_t *)eo_context)->agrp); + test_fatal_if(ret != EM_OK, + "EO remove atomic grp:%" PRI_STAT " EO:%" PRI_EO "", ret, eo); + + if (!g_options.info_only) { + ret = em_dispatch_unregister_enter_cb(enter_cb); + test_fatal_if(ret != EM_OK, "enter_cb() unregister:%" PRI_STAT, ret); + ret = em_dispatch_unregister_exit_cb(exit_cb); + test_fatal_if(ret != EM_OK, "exit_cb() unregister:%" PRI_STAT, ret); + } + + if (eo_ctx->bg_data != NULL) + free(eo_ctx->bg_data); + eo_ctx->bg_data = NULL; + if (eo_ctx->mz_data != NULL) { + if (g_options.mz_huge) + munmap(eo_ctx->mz_data, g_options.mz_mb * 1024UL * 1024UL); + else + free(eo_ctx->mz_data); + + eo_ctx->mz_data = NULL; + } + + return EM_OK; +} + +/** + * @private + * + * EO stop local function. + */ +static em_status_t app_eo_stop_local(void *eo_context, em_eo_t eo) +{ + int core = em_core_id(); + app_eo_ctx_t *const eo_ctx = eo_context; + + (void)eo; + + if (EXTRA_PRINTS) + APPL_PRINT("EO local stop\n"); + free_tracebuf(eo_ctx->cdat[core].trc, eo_ctx->cdat[core].trc_size); + eo_ctx->cdat[core].trc = NULL; + return EM_OK; +} + +/** + * @private + * + * EO receive function + */ +static void app_eo_receive(void *eo_context, em_event_t event, + em_event_type_t type, em_queue_t queue, + void *q_context) +{ + app_eo_ctx_t *const eo_ctx = eo_context; + int reuse = 0; + static int last_count; + + (void)q_context; + + if (type == EM_EVENT_TYPE_SW) { + app_msg_t *msgin = (app_msg_t *)em_event_pointer(event); + + switch (msgin->command) { + case CMD_TMO: + reuse = handle_periodic(eo_ctx, event); + break; + + case CMD_HEARTBEAT: /* uses atomic queue */ + handle_heartbeat(eo_ctx, event); + last_count = msgin->count; + reuse = 1; + break; + + case CMD_BGWORK: + reuse = do_bg_work(event, eo_ctx); + break; + + case CMD_DONE: /* HB atomic queue */ { + e_state state = __atomic_load_n(&eo_ctx->state, __ATOMIC_ACQUIRE); + + /* only do this once */ + if (state == STATE_RUN && queue == eo_ctx->stop_q) { + __atomic_store_n(&eo_ctx->state, STATE_COOLOFF, __ATOMIC_SEQ_CST); + add_trace(eo_ctx, -1, OP_STATE, linux_time_ns(), STATE_COOLOFF, -1); + eo_ctx->last_hbcount = last_count; + eo_ctx->stopped = get_time(); + APPL_PRINT("Core %d reported DONE\n", msgin->id); + } + } + break; + + default: + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event!\n"); + } + } else { + test_error(EM_ERROR_SET_FATAL(0xDEAD), 0xBEEF, "Invalid event type!\n"); + } + + if (!reuse) + em_free(event); +} + +int main(int argc, char *argv[]) +{ + /* pick app-specific arguments after '--' */ + int i; + + APPL_PRINT("EM periodic timer test %s\n\n", VERSION); + + for (i = 1; i < argc; i++) { + if (!strcmp(argv[i], "--")) + break; + } + if (i < argc) { + if (!parse_my_args(i, argc, argv)) { + APPL_PRINT("Invalid application arguments\n"); + return 1; + } + } + + return cm_setup(argc, argv); +} diff --git a/robot-tests/README.md b/robot-tests/README.md index 4a935cb2..1fe8b303 100644 --- a/robot-tests/README.md +++ b/robot-tests/README.md @@ -10,7 +10,7 @@ sudo pip3 install robotframework ## Manually run individual robot tests ```bash -robot --variable APPLICATION:/ --variable CORE_MASK: --variable APPLICATION_MODE: /.robot +robot --variable APPLICATION:/ --variable TASKSET_CORES: --variable CORE_MASK: --variable APPLICATION_MODE: /.robot e.g -$ robot --variable APPLICATION:/home/username/EM/em-odp/build/programs/example/hello/hello --variable CORE_MASK:0xFE --variable APPLICATION_MODE:t /home/username/EM/em-odp/robot-tests/hello.robot +$ robot --variable APPLICATION:/home/username/EM/em-odp/build/programs/example/hello/hello --variable TASKSET_CORES:1-7 --variable CORE_MASK:0xFE --variable APPLICATION_MODE:t /home/username/EM/em-odp/robot-tests/hello.robot ``` diff --git a/robot-tests/common.resource b/robot-tests/common.resource index 5a31f523..2947efc2 100644 --- a/robot-tests/common.resource +++ b/robot-tests/common.resource @@ -41,6 +41,25 @@ Kill Any Hanging Applications [Documentation] Kill any hanging applications Terminate All Processes kill=true +Check Pool Statistics + [Documentation] Check if pool statistics printed just before program exit + ... match given regax pattern. + + # output: the output from an EM-ODP program + [Arguments] ${output} + + # Match only the pool statistic printed just before program exit + ${last_pool_statistic} = Get Regexp Matches ${output.stdout} + ... ^Status before delete:(.|\n)*Done - exit$(?m) + + # Should have only one match + ${num_match} = Get Length ${last_pool_statistic} + Should Be Equal As Integers ${num_match} 1 + + FOR ${line} IN @{POOL_STATISTICS_MATCH} + Should Match Regexp ${last_pool_statistic}[0] ${line} + END + Run EM-ODP Test [Documentation] Run em-odp application in the background, send SIGINT ... signal to the application process after sleeping the given sleep_time, @@ -56,7 +75,7 @@ Run EM-ODP Test Log To Console \n # Run application with given arguments - ${app_handle} = Process.Start Process ${APPLICATION} + ${app_handle} = Process.Start Process taskset -c ${TASKSET_CORES} ${APPLICATION} ... @{CM_ARGS} ... stderr=STDOUT ... shell=True @@ -91,6 +110,4 @@ Run EM-ODP Test Should Not Match Regexp ${output.stdout} ${line} END - FOR ${line} IN @{POOL_STATISTICS_MATCH} - Should Match Regexp ${output.stdout} ${line} - END + Check Pool Statistics output=${output} diff --git a/robot-tests/example/emcli.robot b/robot-tests/example/emcli.robot index 8c09a3e0..5a44f971 100644 --- a/robot-tests/example/emcli.robot +++ b/robot-tests/example/emcli.robot @@ -26,13 +26,13 @@ Test Emcli [TAGS] ${core_mask} ${application_mode} # Run hello application with given arguments - ${app_handle} = Process.Start Process ${APPLICATION} + ${app_handle} = Process.Start Process taskset -c ${TASKSET_CORES} ${APPLICATION} ... @{CM_ARGS} ... stderr=STDOUT ... shell=True ... stdout=${TEMPDIR}/stdout.txt - Sleep 6s + Sleep 10s Process Should Be Running ${app_handle} # Open telnet connection @@ -80,9 +80,7 @@ Test Emcli Should Not Match Regexp ${output.stdout} ${line} END - FOR ${line} IN @{POOL_STATISTICS_MATCH} - Should Match Regexp ${output.stdout} ${line} - END + Check Pool Statistics output=${output} *** Keywords *** @@ -258,7 +256,7 @@ Test Pool Print # em_pool_print --name appl_pool_1 ${name_regex} = Catenate SEPARATOR= - ... ^\\s{2}0x2\\s+appl_pool_1\\s+pkt\\s+00\\s+00\\s+04\\s+ + ... ^\\s{2}0x[A-Fa-f0-9]+\\s+appl_pool_1\\s+pkt\\s+00\\s+00\\s+04\\s+ ... 0:\\[sz=256 n=16384\\(\\d+\\/\\d+\\) \\$=128\\]\\s+ ... 1:\\[sz=512 n=1024\\(\\d+\\/\\d+\\) \\$=64\\]\\s+ ... 2:\\[sz=1024 n=1024\\(\\d+\\/\\d+\\) \\$=32\\]\\s+ diff --git a/robot-tests/example/startup_pools.robot b/robot-tests/example/startup_pools.robot new file mode 100644 index 00000000..fee1f377 --- /dev/null +++ b/robot-tests/example/startup_pools.robot @@ -0,0 +1,178 @@ +*** Comments *** +Copyright (c) 2020-2022, Nokia Solutions and Networks +All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + + +*** Settings *** +Documentation Test Startup Pools with hello application +Library OperatingSystem +Resource ../common.resource +Test Setup Set Log Level TRACE +Test Teardown Kill Any Hanging Applications + + +*** Variables *** +${HELLO_PRINT} = SEPARATOR= +... Hello world from EO [AB]\!\\s+My queue is 0x[0-9|A-F|a-f]+.\\s+I'm on co +... re [0-9]+.\\s+Event seq is [0-9]+. + +@{REGEX_MATCH} = +... EM-startup_pools config: +... startup_pools.num: 1 +... startup_pools.conf\\[0\\].name: default +... startup_pools.conf\\[0\\].pool: 1 +... startup_pools.conf\\[0\\].pool_cfg.event_type: EM_EVENT_TYPE_SW +... startup_pools.conf\\[0\\].pool_cfg.align_offset.in_use: true +... startup_pools.conf\\[0\\].pool_cfg.align_offset.value: 0 +... startup_pools.conf\\[0\\].pool_cfg.user_area.in_use: true +... startup_pools.conf\\[0\\].pool_cfg.user_area.size: 0 +... startup_pools.conf\\[0\\].pool_cfg.pkt.headroom.in_use: false +... startup_pools.conf\\[0\\].pool_cfg.pkt.headroom.value: 0 +... startup_pools.conf\\[0\\].pool_cfg.num_subpools: 4 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[0\\].size: 256 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[0\\].num: 16384 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[0\\].cache_size: 64 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[1\\].size: 512 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[1\\].num: 1024 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[1\\].cache_size: 32 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[2\\].size: 1024 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[2\\].num: 1024 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[2\\].cache_size: 16 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[3\\].size: 2048 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[3\\].num: 1024 +... startup_pools.conf\\[0\\].pool_cfg.subpools\\[3\\].cache_size: 8 +... ${HELLO_PRINT} +... Done\\s*-\\s*exit + +# For prints that are over 120 characters +${NUM_CONF_NOT_MATCH_PRINT} = SEPARATOR= +... The number of pool configuration\\(s\\) given in\n +... 'startup_pools.conf':2 does not match number of\n +... startup_pools specified in 'startup_pools.num': 3 + +${NUM_SUBPOOLS_NOT_MATCH_PRINT} = SEPARATOR= +... The number of subpool configuration given\n +... in 'startup_pools.conf\\[0\\].pool_cfg.subpools' does not matc +... h 'startup_pools.conf\\[0\\].pool_cfg.num_subpools'. + +${NO_SUBPOOLS_SIZE_PRINT} = SEPARATOR= +... Option 'startup_pools.conf\\[0\\].pool_cfg.subpools\\[0\\].size' no +... t found or wrong type. + +${NO_SUBPOOLS_NUM_PRINT} = SEPARATOR= +... Option 'startup_pools.conf\\[0\\].pool_cfg.subpools\\[0\\].num' no +... t found or wrong type. + +${INVALID_ALIGN_OFFSET_PRINT} = SEPARATOR= +... Invalid 'startup_pools.conf\\[0\\].pool_cfg.align_offset.value': 3\n +... Max align_offset is \\d+ and it must be power of 2 + +${INVALID_PKT_HEADROOM_PRINT} = SEPARATOR= +... 'startup_pools.conf\\[0\\].pool_cfg.pkt.headroom.value' 256 too larg +... e \\(max=128\\) + +${INVALID_NAME_PRINT} = SEPARATOR= +... 'startup_pools.conf\\[0\\].name' has wrong data type\\(expect string\\) + +${INVALID_OR_NO_IN_USE_PRINT} = SEPARATOR= +... 'startup_pools.conf\\[0\\].pool_cfg.align_offset.in_use' not found or wrong type + +${NO_VALUE_PRINT} = SEPARATOR= +... 'startup_pools.conf\\[0\\].pool_cfg.align_offset.value' not found or wront type + +# Output for each test startup_pools conf +&{CONF_OUT} = +... bad_num.conf=Number of startup_pools 64 is too large or too small +... no-conf.conf=Conf option 'startup_pools.conf' not found +... num-conf-not-match.conf=${NUM_CONF_NOT_MATCH_PRINT} +... no-pool-cfg.conf=Conf option 'startup_pools.conf\\[0\\].pool_cfg' not found +... no-event-type.conf='startup_pools.conf\\[0\\].pool_cfg.event_type' not found. +... no-num-subpools.conf='startup_pools.conf\\[0\\].pool_cfg.num_subpools' not found. +... no-subpools.conf='startup_pools.conf\\[0\\].pool_cfg.subpools' not found. +... num-subpools-not-match.conf=${NUM_SUBPOOLS_NOT_MATCH_PRINT} +... no-subpools-size.conf=${NO_SUBPOOLS_SIZE_PRINT} +... no-subpools-num.conf=${NO_SUBPOOLS_NUM_PRINT} +... default-name-non-default-id.conf=Default name "default" with non-default ID 2 +... default-id-non-default-name.conf=Default pool ID 1 with non-default name "test" +... invalid-align-offset.conf=${INVALID_ALIGN_OFFSET_PRINT} +... invalid-user-area.conf=Event user area too large: 512 +... invalid-pkt-headroom.conf=${INVALID_PKT_HEADROOM_PRINT} +... invalid-name.conf=${INVALID_NAME_PRINT} +... no-align-offset-in-use-value.conf=${INVALID_OR_NO_IN_USE_PRINT} +... no-align-offset-in-use.conf=${INVALID_OR_NO_IN_USE_PRINT} +... no-align-offset-value.conf=${NO_VALUE_PRINT} + +# Directory where all confs to be tested are stored +${CONF_DIR} = ${CURDIR}${/}test-startup-pools-confs + + +*** Test Cases *** +Test Invalid Startup Pool Confs + [Documentation] Test all invalid 'startup_pools' options and verify that + ... they fail as expected. + [TAGS] ${CORE_MASK} ${APPLICATION_MODE} + + FOR ${conf} IN @{CONF_OUT} + # Set up huge pages + Run ${EXECDIR}${/}scripts${/}gitlab-ci${/}huge-pages-setup.sh + + # Include the 'startup_pools' conf to em-odp.conf + # sed syntax explained: $ matches the last line, a is the append command + Run sed -i -e '$a@include "${CONF_DIR}/${conf}"' %{EM_CONFIG_FILE} + + # Run hello program with given arguments + ${output} = Process.Run Process ${APPLICATION} + ... @{CM_ARGS} + ... stderr=STDOUT + ... shell=True + ... stdout=${TEMPDIR}/stdout.txt + + # Delete the 'starup_pools' option that has been tested + Run sed -i '$ d' %{EM_CONFIG_FILE} + + # Check output + Should Match Regexp ${output.stdout} ${CONF_OUT}[${conf}] + END + +Test Default Startup Pool Conf + [Documentation] Test the default 'startup_pools' option that is commented + ... out in em-odp.conf. The default pool configuration should override the + ... one passed to em_init(), namely, em_conf_t::default_pool_cfg. + [TAGS] ${CORE_MASK} ${APPLICATION_MODE} + + # Set up huge pages + Run ${EXECDIR}${/}scripts${/}gitlab-ci${/}huge-pages-setup.sh + + # Uncomment option 'startup_pools' + Run sed -i '/^#startup_pools:\\s{/,/^#}/s/^#//g' %{EM_CONFIG_FILE} + + # Run hello program, should work as usual, except that startup_pools config + # should be printed out + Run Keyword And Continue On Failure Run EM-ODP Test sleep_time=30 + ... regex_match=${REGEX_MATCH} + + # Comment option 'startup_pools' back + Run sed -i '/^startup_pools:\\s{/,/^}/s/^/#/g' %{EM_CONFIG_FILE} + +Test Non-default Startup Pool Conf + [Documentation] Run hello program with a valid and non-default pool + ... configuration configured via 'startup_pools' in em-odp.conf + [TAGS] ${CORE_MASK} ${APPLICATION_MODE} + + # Set up huge pages + Run ${EXECDIR}${/}scripts${/}gitlab-ci${/}huge-pages-setup.sh + + # Include valid non-default pool configurations to em-odp.conf + Run sed -i -e '$a@include "${CONF_DIR}/non-default-pools.conf"' %{EM_CONFIG_FILE} + + # Run hello program, should work as usual, except that now the program + # should have more pools: default pool which is configured through + # parameter passed to em_init(), appl_pool_1 and the five configured + # in file non-default-pool.conf + @{three_pools} = Create List EM Event Pools: 7 + Run Keyword And Continue On Failure Run EM-ODP Test sleep_time=30 + ... regex_match=${three_pools} + + # Delete the 'starup_pools' setting that has been tested + Run sed -i '$ d' %{EM_CONFIG_FILE} diff --git a/robot-tests/example/test-startup-pools-confs/bad_num.conf b/robot-tests/example/test-startup-pools-confs/bad_num.conf new file mode 100644 index 00000000..d8c84693 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/bad_num.conf @@ -0,0 +1,6 @@ + +startup_pools: { + # Number of pools to be created exceeds maximum number of EM pools + # defined as EM_CONFIG_POOLS + num = 64 +} diff --git a/robot-tests/example/test-startup-pools-confs/default-id-non-default-name.conf b/robot-tests/example/test-startup-pools-confs/default-id-non-default-name.conf new file mode 100644 index 00000000..b226a8f6 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/default-id-non-default-name.conf @@ -0,0 +1,16 @@ +startup_pools: { + num = 1 + conf:({ + # Default pool ID with non-default name + name = "test" + pool = 1 + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/default-name-non-default-id.conf b/robot-tests/example/test-startup-pools-confs/default-name-non-default-id.conf new file mode 100644 index 00000000..0a0940ae --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/default-name-non-default-id.conf @@ -0,0 +1,16 @@ +startup_pools: { + num = 1 + conf:({ + # Default name with non-default pool ID + name = "default" + pool = 2 + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/invalid-align-offset.conf b/robot-tests/example/test-startup-pools-confs/invalid-align-offset.conf new file mode 100644 index 00000000..ee17c1e9 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/invalid-align-offset.conf @@ -0,0 +1,17 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + align_offset: { + in_use = true + value = 3 # Invalid, must be power of 2 + } + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/invalid-name.conf b/robot-tests/example/test-startup-pools-confs/invalid-name.conf new file mode 100644 index 00000000..e4e48c06 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/invalid-name.conf @@ -0,0 +1,15 @@ +startup_pools: { + num = 1 + conf:({ + # Optional option given wrong data type, name should be string + name = 1 + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/invalid-pkt-headroom.conf b/robot-tests/example/test-startup-pools-confs/invalid-pkt-headroom.conf new file mode 100644 index 00000000..18701cf1 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/invalid-pkt-headroom.conf @@ -0,0 +1,19 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_PACKET" + pkt: { + headroom: { + in_use = true + value = 256 # Too large + } + } + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/invalid-user-area.conf b/robot-tests/example/test-startup-pools-confs/invalid-user-area.conf new file mode 100644 index 00000000..4446fa5c --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/invalid-user-area.conf @@ -0,0 +1,17 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + user_area: { + in_use = true + size = 512 # Too large + } + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-align-offset-in-use-value.conf b/robot-tests/example/test-startup-pools-confs/no-align-offset-in-use-value.conf new file mode 100644 index 00000000..7ea89de5 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-align-offset-in-use-value.conf @@ -0,0 +1,16 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + # align_offset is given but neither in_use and value is provided + align_offset: { + } + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-align-offset-in-use.conf b/robot-tests/example/test-startup-pools-confs/no-align-offset-in-use.conf new file mode 100644 index 00000000..37f19bdf --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-align-offset-in-use.conf @@ -0,0 +1,17 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + align_offset: { + # in_use missing + size = 0 + } + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-align-offset-value.conf b/robot-tests/example/test-startup-pools-confs/no-align-offset-value.conf new file mode 100644 index 00000000..40439e15 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-align-offset-value.conf @@ -0,0 +1,17 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + align_offset: { + in_use = true + # value is missing + } + num_subpools = 1 + subpools: ({ + size = 256 + num = 16384 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-conf.conf b/robot-tests/example/test-startup-pools-confs/no-conf.conf new file mode 100644 index 00000000..2bec1c00 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-conf.conf @@ -0,0 +1,4 @@ +startup_pools: { + num = 2 + # Mandatory conf is missing +} diff --git a/robot-tests/example/test-startup-pools-confs/no-event-type.conf b/robot-tests/example/test-startup-pools-confs/no-event-type.conf new file mode 100644 index 00000000..0e4e310d --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-event-type.conf @@ -0,0 +1,7 @@ +startup_pools: { + num = 1 + conf:({ + # Mandatory event_type is missing + pool_cfg: {} + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-num-subpools.conf b/robot-tests/example/test-startup-pools-confs/no-num-subpools.conf new file mode 100644 index 00000000..5ba168d2 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-num-subpools.conf @@ -0,0 +1,9 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + # Mandatory num_subpools is missing + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-pool-cfg.conf b/robot-tests/example/test-startup-pools-confs/no-pool-cfg.conf new file mode 100644 index 00000000..99a94aae --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-pool-cfg.conf @@ -0,0 +1,5 @@ +startup_pools: { + num = 1 + # Mandatory pool_cfg is missing + conf:({}) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-subpools-num.conf b/robot-tests/example/test-startup-pools-confs/no-subpools-num.conf new file mode 100644 index 00000000..e7c7dd39 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-subpools-num.conf @@ -0,0 +1,13 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 1 + # Mandatory num inside subpools is missing + subpools: ({ + size = 256 + }) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-subpools-size.conf b/robot-tests/example/test-startup-pools-confs/no-subpools-size.conf new file mode 100644 index 00000000..6b63dd18 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-subpools-size.conf @@ -0,0 +1,11 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 2 + # Mandatory size inside subpools is missing + subpools: ({}, {}) + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/no-subpools.conf b/robot-tests/example/test-startup-pools-confs/no-subpools.conf new file mode 100644 index 00000000..2ec77ce5 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/no-subpools.conf @@ -0,0 +1,10 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 2 + # Mandatory subpools is missing + } + }) +} diff --git a/robot-tests/example/test-startup-pools-confs/non-default-pools.conf b/robot-tests/example/test-startup-pools-confs/non-default-pools.conf new file mode 100644 index 00000000..9e20e923 --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/non-default-pools.conf @@ -0,0 +1,84 @@ +# Five non-default pool configurations +startup_pools: { + num = 5 + conf:({ + name = "startup-pool-1" + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 1 + subpools: ({ + size = 1024 + num = 1024 + }) + } + }, + { + name = "startup-pool-2" + pool_cfg: { + event_type = "EM_EVENT_TYPE_PACKET" + num_subpools = 2 + subpools: ({ + size = 256 + num = 16384 + }, + { + size = 1024 + num = 1024 + }) + } + }, + { + #name = "startup-pool-3" (no name given) + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + num_subpools = 3 + subpools: ({ + size = 256 + num = 16384 + }, + { + size = 512 + num = 1024 + }, + { + size = 1024 + num = 1024 + }) + } + }, + { + #name = "startup-pool-4" (no name given) + pool_cfg: { + event_type = "EM_EVENT_TYPE_PACKET" + num_subpools = 4 + subpools: ({ + size = 256 + num = 16384 + }, + { + size = 512 + num = 1024 + }, + { + size = 1024 + num = 1024 + }, + { + size = 2048 + num = 1024 + }) + } + }, + { + name = "startup-pool-5" + pool_cfg: { + event_type = "EM_EVENT_TYPE_VECTOR" + num_subpools = 1 + subpools: ({ + size = 32 + num = 1024 + }) + } + } + ) +} diff --git a/robot-tests/example/test-startup-pools-confs/num-conf-not-match.conf b/robot-tests/example/test-startup-pools-confs/num-conf-not-match.conf new file mode 100644 index 00000000..8beca07f --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/num-conf-not-match.conf @@ -0,0 +1,6 @@ +startup_pools: { + # Number of pools to be created (num = 3) and the number of configurations + # given inside conf does not match + num = 3 + conf: ({},{}) +} diff --git a/robot-tests/example/test-startup-pools-confs/num-subpools-not-match.conf b/robot-tests/example/test-startup-pools-confs/num-subpools-not-match.conf new file mode 100644 index 00000000..971dcf1a --- /dev/null +++ b/robot-tests/example/test-startup-pools-confs/num-subpools-not-match.conf @@ -0,0 +1,12 @@ +startup_pools: { + num = 1 + conf:({ + pool_cfg: { + event_type = "EM_EVENT_TYPE_SW" + # num_subpools 2 does not match the number of subpool + # configurations inside subpools 1 + num_subpools = 2 + subpools: ({}) + } + }) +} diff --git a/robot-tests/performance/loop_refs.robot b/robot-tests/performance/loop_refs.robot new file mode 100644 index 00000000..b3397439 --- /dev/null +++ b/robot-tests/performance/loop_refs.robot @@ -0,0 +1,29 @@ +*** Comments *** +Copyright (c) 2020-2023, Nokia Solutions and Networks +All rights reserved. +SPDX-License-Identifier: BSD-3-Clause + + +*** Settings *** +Documentation Test Loop Refs -c ${CORE_MASK} -${APPLICATION_MODE} +Resource ../common.resource +Test Setup Set Log Level TRACE +Test Teardown Kill Any Hanging Applications + + +*** Variables *** +${FIRST_REGEX} = SEPARATOR= +... cycles/event:\\s*[0-9]+\\.[0-9]+\\s*Mevents/s/core:\\s*[0-9]+\\.[0-9]+ +... \\s*[0-9]+\\s*MHz\\s*core[0-9]+\\s*[0-9]+ + +@{REGEX_MATCH} = +... ${FIRST_REGEX} +... Done\\s*-\\s*exit + + +*** Test Cases *** +Test Loop Refs + [Documentation] loop_refs -c ${CORE_MASK} -${APPLICATION_MODE} + [TAGS] ${CORE_MASK} ${APPLICATION_MODE} + + Run EM-ODP Test sleep_time=60 regex_match=${REGEX_MATCH} diff --git a/scripts/em_odp_check b/scripts/em_odp_check index 27b1787e..4a01e6a7 100755 --- a/scripts/em_odp_check +++ b/scripts/em_odp_check @@ -19,7 +19,9 @@ IGNORE+='FUNCTION_ARGUMENTS,' IGNORE+='MACRO_ARG_REUSE,' IGNORE+='SPDX_LICENSE_TAG,' IGNORE+='C99_COMMENT_TOLERANCE,' # C99-style comments '//' reported as error -IGNORE+='PREFER_FALLTHROUGH' +IGNORE+='PREFER_FALLTHROUGH,' +IGNORE+='SSCANF_TO_KSTRTO' $DIR/cleanfile $1 2> /dev/null -$DIR/checkpatch.pl -f --no-tree --ignore $IGNORE --mailback --strict --terse --no-summary --show-types $1 2> /dev/null +$DIR/checkpatch.pl -f --no-tree --ignore $IGNORE --mailback --strict --terse \ + --no-summary --show-types $1 2> /dev/null diff --git a/scripts/robot_test.sh b/scripts/robot_test.sh index 03398fa0..c3f237eb 100755 --- a/scripts/robot_test.sh +++ b/scripts/robot_test.sh @@ -26,16 +26,17 @@ apps["event_group"]=programs/example/event_group/event_group apps["fractal"]=programs/example/fractal/fractal apps["hello"]=programs/example/hello/hello apps["ordered"]=programs/example/queue/ordered -apps["queue_group"]=programs/example/queue_group/queue_group apps["queue_types_ag"]=programs/example/queue/queue_types_ag apps["queue_types_local"]=programs/example/queue/queue_types_local +apps["queue_group"]=programs/example/queue_group/queue_group apps["timer_hello"]=programs/example/add-ons/timer_hello apps["timer_test"]=programs/example/add-ons/timer_test # Performance Apps apps["atomic_processing_end"]=programs/performance/atomic_processing_end -apps["loop_multircv"]=programs/performance/loop_multircv apps["loop"]=programs/performance/loop +apps["loop_multircv"]=programs/performance/loop_multircv +apps["loop_refs"]=programs/performance/loop_refs apps["pairs"]=programs/performance/pairs apps["queue_groups"]=programs/performance/queue_groups apps["queues_local"]=programs/performance/queues_local @@ -50,9 +51,9 @@ odp_conf="odp/config/odp-linux-generic.conf" sed -i 's/cpu_mhz\s*=.*/cpu_mhz = 2800/' "${odp_conf}" # - set system.cpu_mhz_max = 2800 sed -i 's/cpu_mhz_max\s*=.*/cpu_mhz_max = 2800/' "${odp_conf}" -# - set timer.inline = 1 +# - set timer.inline = 1: Use inline timer implementation sed -i 's/inline\s*=.*/inline = 1/' "${odp_conf}" -# - set inline_thread_type = 1 +# - set timer.inline_thread_type = 1: Only worker threads process non-private timer pools sed -i 's/inline_thread_type\s*=.*/inline_thread_type = 1/' "${odp_conf}" em_conf="config/em-odp.conf" @@ -89,6 +90,7 @@ for app in "${!apps[@]}"; do EM_CONFIG_FILE="${em_conf}" \ robot \ --variable APPLICATION:"${apps[${app}]}" \ + --variable TASKSET_CORES:"0-1" \ --variable CORE_MASK:"${core_masks[$i]}" \ --variable APPLICATION_MODE:"${modes[$j]}" \ --log NONE \ diff --git a/scripts/style_check.py b/scripts/style_check.py index b9da9ddd..63234695 100755 --- a/scripts/style_check.py +++ b/scripts/style_check.py @@ -1,67 +1,67 @@ -#!/usr/bin/env python - -import os -import sys -import threading - -# Conjure repo root dir. Presume that parent of script dir is repo root folder. -ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) - -# Style check script command -C_CHECK = ROOT_DIR + "/scripts/em_odp_check " - -# File extensions to check -EXT = ('.c', '.h') - -# Start checking from these folders -CHECK_DIRS = ["include", "src", "programs"] - -# Filter out these directories -IGNORE_DIRS = [] - -# Set absolute paths to check dirs -CHECK_DIRS = [os.path.join(ROOT_DIR, dir) for dir in CHECK_DIRS] - -# Multithread safe function to run the check script for file in file_list -def run_checks(): - global rc - - while file_list: - file = file_list.pop() - - # Option to run different check script for different files - # if file.endswith(('.c', '.h')): - cmd = C_CHECK + file - - if os.system(cmd) != 0: - rc = 1 - -rc = 0 -file_list = [] -threads = [] - -# Collect and add all files to be checked to file_list -for check_dir in CHECK_DIRS: - for root, dirs, files in os.walk(check_dir): - if not any(path in root for path in IGNORE_DIRS): - for file in files: - if file.endswith(EXT): - file_list.append(os.path.join(root, file)) - - -# Run checks on all files in file_list with multiple threads -for i in range(5): - t = threading.Thread(target=run_checks) - threads.append(t) - t.start() - -# Wait for threads -for t in threads: - t.join() - -if rc == 1: - print("Style errors found.") -else: - print("Style check OK!") - -sys.exit(rc) +#!/usr/bin/env python + +import os +import sys +import threading + +# Conjure repo root dir. Presume that parent of script dir is repo root folder. +ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)) + +# Style check script command +C_CHECK = ROOT_DIR + "/scripts/em_odp_check " + +# File extensions to check +EXT = ('.c', '.h') + +# Start checking from these folders +CHECK_DIRS = ["include", "src", "programs"] + +# Filter out these directories +IGNORE_DIRS = [] + +# Set absolute paths to check dirs +CHECK_DIRS = [os.path.join(ROOT_DIR, dir) for dir in CHECK_DIRS] + +# Multithread safe function to run the check script for file in file_list +def run_checks(): + global rc + + while file_list: + file = file_list.pop() + + # Option to run different check script for different files + # if file.endswith(('.c', '.h')): + cmd = C_CHECK + file + + if os.system(cmd) != 0: + rc = 1 + +rc = 0 +file_list = [] +threads = [] + +# Collect and add all files to be checked to file_list +for check_dir in CHECK_DIRS: + for root, dirs, files in os.walk(check_dir): + if not any(path in root for path in IGNORE_DIRS): + for file in files: + if file.endswith(EXT): + file_list.append(os.path.join(root, file)) + + +# Run checks on all files in file_list with multiple threads +for i in range(15): + t = threading.Thread(target=run_checks) + threads.append(t) + t.start() + +# Wait for threads +for t in threads: + t.join() + +if rc == 1: + print("Style errors found.") +else: + print("Style check OK!") + +sys.exit(rc) diff --git a/src/Makefile.am b/src/Makefile.am index 5bce3fe0..de430a31 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -41,6 +41,7 @@ $(top_builddir)/include/event_machine/api/event_machine_version.h emhelperincludedir = $(includedir)/event_machine/helper emhelperinclude_HEADERS = \ +$(top_srcdir)/include/event_machine/helper/event_machine_debug.h \ $(top_srcdir)/include/event_machine/helper/event_machine_helper.h emaddonsincludedir = $(includedir)/event_machine/add-ons @@ -131,6 +132,7 @@ em_error_types.h \ \ em_event.c \ em_event.h \ +em_event_inline.h \ em_event_types.h \ em_event_state.c \ em_event_state.h \ @@ -171,6 +173,7 @@ em_queue_types.h \ \ em_queue_group.c \ em_queue_group.h \ +em_queue_inline.h \ em_queue_group_types.h \ \ em_sync_api_types.h \ diff --git a/src/add-ons/event_timer/em_timer.c b/src/add-ons/event_timer/em_timer.c index cc855b60..b492a5f3 100644 --- a/src/add-ons/event_timer/em_timer.c +++ b/src/add-ons/event_timer/em_timer.c @@ -1,60 +1,58 @@ -/* - * Copyright (c) 2017, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#include "em_include.h" -#include - -#include "em_timer.h" - -em_status_t timer_init(timer_storage_t *const tmrs) -{ - int i; - - for (i = 0; i < EM_ODP_MAX_TIMERS; i++) - tmrs->timer[i].idx = i; - - odp_ticketlock_init(&tmrs->timer_lock); - - return EM_OK; -} - -em_status_t timer_init_local(void) -{ - return EM_OK; -} - -em_status_t timer_term_local(void) -{ - return EM_OK; -} - -em_status_t timer_term(void) -{ - return EM_OK; -} +/* + * Copyright (c) 2017, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include "em_include.h" +#include + +#include "em_timer.h" + +em_status_t timer_init(timer_storage_t *const tmrs) +{ + for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) + tmrs->timer[i].idx = i; + + odp_ticketlock_init(&tmrs->timer_lock); + + return EM_OK; +} + +em_status_t timer_init_local(void) +{ + return EM_OK; +} + +em_status_t timer_term_local(void) +{ + return EM_OK; +} + +em_status_t timer_term(void) +{ + return EM_OK; +} diff --git a/src/add-ons/event_timer/event_machine_timer.c b/src/add-ons/event_timer/event_machine_timer.c index 292d28b0..ecd53f8e 100644 --- a/src/add-ons/event_timer/event_machine_timer.c +++ b/src/add-ons/event_timer/event_machine_timer.c @@ -1,1020 +1,1053 @@ -/* - * Copyright (c) 2016, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * --------------------------------------------------------------------- - * Some notes about the implementation: - * - * EM timer add-on API is close to ODP timer, but there are issues - * making this code a bit more complex than it could be: - * - * 1) no periodic timer in ODP - * 2) unless using the pre-defined timeout event there is no way to access - * all necessary information runtime to implement a periodic timer - * - * Point 2 is solved by creating a timeout pool. When user allocates - * EM timeout, a new minimum size buffer is allocated to store all the needed - * information. Timer handle is a pointer to such buffer so all data is - * available via the handle (ack() is the most problematic case). This does - * create performance penalty, but so far it looks like the penalty is not - * too large and does simplify the code otherwise. Also timeouts could be - * pre-allocated as the API separates creation and arming. - * Most of the syncronization is handled by ODP timer, a ticketlock is used - * for high level management API. - * - */ -#include "em_include.h" -#include -#include "em_timer.h" - -/* timer handle = index + 1 (UNDEF 0) */ -#define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1)) -#define TMR_H2I(x) ((int)((uintptr_t)(x) - 1)) - -static inline int is_queue_valid_type(em_timer_t tmr, const queue_elem_t *q_elem) -{ - unsigned int tmridx = (unsigned int)TMR_H2I(tmr); - - /* implementation specific */ - if (em_shm->timers.timer[tmridx].plain_q_ok && q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED) - return 1; - /* EM assumes scheduled always supported */ - return (q_elem->type == EM_QUEUE_TYPE_ATOMIC || - q_elem->type == EM_QUEUE_TYPE_PARALLEL || - q_elem->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) ? 1 : 0; - - /* LOCAL or OUTPUT queues not supported */ -} - -static inline int is_timer_valid(em_timer_t tmr) -{ - unsigned int i; - const timer_storage_t *const tmrs = &em_shm->timers; - - if (unlikely(tmr == EM_TIMER_UNDEF)) - return 0; - - i = (unsigned int)TMR_H2I(tmr); - if (unlikely(i >= EM_ODP_MAX_TIMERS)) - return 0; - - if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID || - tmrs->timer[i].tmo_pool == ODP_POOL_INVALID)) - return 0; - return 1; -} - -static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev, - event_hdr_t *ev_hdr, - em_queue_t queue) -{ - if (esv_enabled()) - evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__NOSKIP); - - em_status_t err = em_send(next_tmo_ev, queue); - - if (unlikely(err != EM_OK)) - err = INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK, "Tmo ACK: noskip em_send fail"); - return err; /* EM_OK or send-failure */ -} - -static inline void handle_ack_skip(em_tmo_t tmo) -{ - uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool); - uint64_t skips; - - if (odpt > tmo->last_tick) /* late, over next period */ - skips = ((odpt - tmo->last_tick) / tmo->period) + 1; - else - skips = 1; /* not yet over next period, but late for setting */ - - tmo->last_tick += skips * tmo->period; - TMR_DBG_PRINT("%lu skips * %lu ticks => new tgt %lu\n", - skips, tmo->period, tmo->last_tick); - if (EM_TIMER_TMO_STATS) - tmo->stats.num_period_skips += skips; -} - -static inline bool check_tmo_flags(em_tmo_flag_t flags) -{ - /* Check for valid tmo flags (oneshot OR periodic mainly) */ - if (unlikely(!(flags & (EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC)))) - return false; - - if (unlikely((flags & EM_TMO_FLAG_ONESHOT) && (flags & EM_TMO_FLAG_PERIODIC))) - return false; - - if (EM_CHECK_LEVEL > 1) { - em_tmo_flag_t inv_flags = ~(EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC | - EM_TMO_FLAG_NOSKIP); - if (unlikely(flags & inv_flags)) - return false; - } - return true; -} - -static inline bool check_timer_attr(const em_timer_attr_t *tmr_attr) -{ - if (unlikely(tmr_attr == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_CREATE, - "NULL ptr given"); - return false; - } - if (unlikely(tmr_attr->__internal_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, - "em_timer_attr_t not initialized"); - return false; - } - if (unlikely(tmr_attr->resparam.res_ns && tmr_attr->resparam.res_hz)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, - "Only res_ns OR res_hz allowed"); - return false; - } - return true; -} - -void em_timer_attr_init(em_timer_attr_t *tmr_attr) -{ - if (unlikely(EM_CHECK_LEVEL > 0 && tmr_attr == NULL)) - return; /* just ignore NULL here */ - - /* strategy: first put default resolution, then validate based on that */ - tmr_attr->resparam.res_ns = EM_ODP_TIMER_RESOL_DEF_NS; - tmr_attr->resparam.res_hz = 0; - tmr_attr->resparam.clk_src = EM_TIMER_CLKSRC_DEFAULT; - tmr_attr->flags = EM_TIMER_FLAG_DEFAULT; - - odp_timer_clk_src_t odp_clksrc; - odp_timer_capability_t odp_capa; - odp_timer_res_capability_t odp_res_capa; - int err; - - err = timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_ATTR_INIT, - "Unsupported EM-timer clock source:%d", - tmr_attr->resparam.clk_src); - return; - } - err = odp_timer_capability(odp_clksrc, &odp_capa); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, - "Timer capability: ret %d, odp-clksrc:%d", - err, odp_clksrc); - return; - } - - memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); - odp_res_capa.res_ns = tmr_attr->resparam.res_ns; - err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, - "Timer res capability: ret %d, odp-clksrc:%d, res %lu", - err, odp_clksrc, tmr_attr->resparam.res_ns); - return; - } - - TMR_DBG_PRINT("res %lu -> ODP says min %lu, max %lu\n", - tmr_attr->resparam.res_ns, odp_res_capa.min_tmo, - odp_res_capa.max_tmo); - - tmr_attr->num_tmo = EM_ODP_DEFAULT_TMOS; - if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS) - tmr_attr->num_tmo = odp_capa.max_timers; - - tmr_attr->resparam.min_tmo = odp_res_capa.min_tmo; - tmr_attr->resparam.max_tmo = odp_res_capa.max_tmo; - tmr_attr->name[0] = 0; /* timer_create will add default (no index available here) */ - tmr_attr->__internal_check = EM_CHECK_INIT_CALLED; -} - -em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(capa == NULL)) { - EM_LOG(EM_LOG_DBG, "%s(): NULL capa ptr!\n", __func__); - return EM_ERR_BAD_POINTER; - } - - odp_timer_clk_src_t odp_clksrc; - odp_timer_capability_t odp_capa; - - if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) || - odp_timer_capability(odp_clksrc, &odp_capa))) { - EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); - return EM_ERR_BAD_ARG; - } - - capa->max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ? - odp_capa.max_pools : EM_ODP_MAX_TIMERS; - capa->max_num_tmo = odp_capa.max_timers; - capa->max_res.clk_src = clk_src; - capa->max_res.res_ns = odp_capa.max_res.res_ns; - capa->max_res.res_hz = odp_capa.max_res.res_hz; - capa->max_res.min_tmo = odp_capa.max_res.min_tmo; - capa->max_res.max_tmo = odp_capa.max_res.max_tmo; - capa->max_tmo.clk_src = clk_src; - capa->max_tmo.res_ns = odp_capa.max_tmo.res_ns; - capa->max_tmo.res_hz = odp_capa.max_tmo.res_hz; - capa->max_tmo.min_tmo = odp_capa.max_tmo.min_tmo; - capa->max_tmo.max_tmo = odp_capa.max_tmo.max_tmo; - return EM_OK; -} - -em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(res == NULL)) { - EM_LOG(EM_LOG_DBG, "%s: NULL ptr res\n", __func__); - return EM_ERR_BAD_POINTER; - } - - odp_timer_clk_src_t odp_clksrc; - odp_timer_res_capability_t odp_res_capa; - int err; - - err = timer_clksrc_em2odp(clk_src, &odp_clksrc); - if (unlikely(err)) { - EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); - return EM_ERR_BAD_ARG; - } - memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); - odp_res_capa.res_ns = res->res_ns; - odp_res_capa.res_hz = res->res_hz; - odp_res_capa.max_tmo = res->max_tmo; /* ODP will check if both were set */ - err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); - if (unlikely(err)) { - EM_LOG(EM_LOG_DBG, "%s: ODP res_capability failed (ret %d)!\n", __func__, err); - return EM_ERR_BAD_ARG; - } - res->min_tmo = odp_res_capa.min_tmo; - res->max_tmo = odp_res_capa.max_tmo; - res->res_ns = odp_res_capa.res_ns; - res->res_hz = odp_res_capa.res_hz; - res->clk_src = clk_src; - return EM_OK; -} - -em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr) -{ - if (EM_CHECK_LEVEL > 0) { - if (check_timer_attr(tmr_attr) == false) - return EM_TIMER_UNDEF; - } - - odp_timer_pool_param_t odp_tpool_param; - odp_timer_clk_src_t odp_clksrc; - - odp_timer_pool_param_init(&odp_tpool_param); - odp_tpool_param.res_ns = tmr_attr->resparam.res_ns; - odp_tpool_param.res_hz = tmr_attr->resparam.res_hz; - odp_tpool_param.min_tmo = tmr_attr->resparam.min_tmo; - odp_tpool_param.max_tmo = tmr_attr->resparam.max_tmo; - odp_tpool_param.num_timers = tmr_attr->num_tmo; - odp_tpool_param.priv = tmr_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0; - if (unlikely(timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, - "Unsupported EM-timer clock source:%d", - tmr_attr->resparam.clk_src); - return EM_TIMER_UNDEF; - } - odp_tpool_param.clk_src = odp_clksrc; - - /* check queue type support */ - odp_timer_capability_t capa; - - if (unlikely(odp_timer_capability(odp_clksrc, &capa))) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, - "ODP timer capa failed for clk:%d", - tmr_attr->resparam.clk_src); - return EM_TIMER_UNDEF; - } - if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */ - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, - "ODP does not support scheduled q for clk:%d", - tmr_attr->resparam.clk_src); - return EM_TIMER_UNDEF; - } - - /* buffer pool for tmos */ - odp_pool_param_t odp_pool_param; - - odp_pool_param_init(&odp_pool_param); - odp_pool_param.type = ODP_POOL_BUFFER; - odp_pool_param.buf.size = sizeof(em_timer_timeout_t); - odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE; - if (odp_pool_param.buf.cache_size > EM_ODP_TIMER_CACHE) - odp_pool_param.buf.cache_size = EM_ODP_TIMER_CACHE; - TMR_DBG_PRINT("local tmo pool cache %d\n", odp_pool_param.buf.cache_size); - - /* local pool caching may cause out of buffers situation on a core. Adjust, - * but not waste too much memory - */ - uint32_t num = tmr_attr->num_tmo + ((em_core_count() - 1) * odp_pool_param.buf.cache_size); - - if (tmr_attr->num_tmo < num) { - TMR_DBG_PRINT("Adjusted pool size %d->%d due to local caching (%d)\n", - tmr_attr->num_tmo, num, odp_pool_param.buf.cache_size); - } - odp_pool_param.buf.num = num; - - /* - * Find a free timer-slot. - * This linear search should not be a performance problem with only a few timers - * available especially when these are typically created at startup. - */ - int i; - event_timer_t *timer; - - odp_ticketlock_lock(&em_shm->timers.timer_lock); - - for (i = 0; i < EM_ODP_MAX_TIMERS; i++) { - timer = &em_shm->timers.timer[i]; - if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID) /* marks used entry */ - continue; - - char timer_pool_name[ODP_TIMER_POOL_NAME_LEN]; - char tmo_pool_name[ODP_POOL_NAME_LEN]; - const char *name = tmr_attr->name; - - if (tmr_attr->name[0] == '\0') { /* replace NULL with default */ - snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN, - "EM-timer-%d", timer->idx); /* idx initialized by timer_init */ - name = timer_pool_name; - } - - TMR_DBG_PRINT("Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n", - odp_tpool_param.clk_src, odp_tpool_param.res_ns, - odp_tpool_param.res_hz); - timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param); - if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) - goto error_locked; - TMR_DBG_PRINT("Created timer: %s with idx: %d\n", name, timer->idx); - - snprintf(tmo_pool_name, ODP_POOL_NAME_LEN, "Tmo-pool-%d", timer->idx); - timer->tmo_pool = odp_pool_create(tmo_pool_name, &odp_pool_param); - if (unlikely(timer->tmo_pool == ODP_POOL_INVALID)) - goto error_locked; - TMR_DBG_PRINT("Created ODP-pool: %s for %d timeouts\n", - tmo_pool_name, odp_pool_param.buf.num); - - timer->flags = tmr_attr->flags; - timer->plain_q_ok = capa.queue_type_plain; - odp_timer_pool_start(); - break; - } - - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - if (unlikely(i >= EM_ODP_MAX_TIMERS)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_CREATE, - "No more timers available"); - return EM_TIMER_UNDEF; - } - TMR_DBG_PRINT("ret %" PRI_TMR "\n", TMR_I2H(i)); - return TMR_I2H(i); - -error_locked: - /* odp_ticketlock_lock(&timer_shm->tlock) */ - - /* 'timer' set in loop */ - if (timer->tmo_pool != ODP_POOL_INVALID) - odp_pool_destroy(timer->tmo_pool); - if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID) - odp_timer_pool_destroy(timer->odp_tmr_pool); - timer->tmo_pool = ODP_POOL_INVALID; - timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID; - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - TMR_DBG_PRINT("ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n", - odp_tpool_param.clk_src, odp_tpool_param.res_ns, - odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers); - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, "Timer pool create failed"); - return EM_TIMER_UNDEF; -} - -em_status_t em_timer_delete(em_timer_t tmr) -{ - timer_storage_t *const tmrs = &em_shm->timers; - int i = TMR_H2I(tmr); - - odp_ticketlock_lock(&tmrs->timer_lock); - /* take lock before checking so nothing can change */ - if (unlikely(!is_timer_valid(tmr))) { - odp_ticketlock_unlock(&tmrs->timer_lock); - return INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TIMER_DELETE, - "Invalid timer:%" PRI_TMR "", tmr); - } - - odp_pool_destroy(tmrs->timer[i].tmo_pool); - tmrs->timer[i].tmo_pool = ODP_POOL_INVALID; - odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool); - tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID; - - odp_ticketlock_unlock(&tmrs->timer_lock); - return EM_OK; -} - -em_timer_tick_t em_timer_current_tick(em_timer_t tmr) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - int i = TMR_H2I(tmr); - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) - return 0; - - return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool); -} - -em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue) -{ - int i = TMR_H2I(tmr); - odp_timer_pool_t odptmr; - const queue_elem_t *const q_elem = queue_elem_get(queue); - odp_buffer_t tmo_buf; - - if (EM_CHECK_LEVEL > 0) { - if (unlikely(!is_timer_valid(tmr))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_CREATE, - "Invalid timer:%" PRI_TMR "", tmr); - return EM_TMO_UNDEF; - } - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": inv.Q:%" PRI_QUEUE "", - tmr, queue); - return EM_TMO_UNDEF; - } - if (unlikely(!is_queue_valid_type(tmr, q_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": inv.Q (type):%" PRI_QUEUE "", - tmr, queue); - return EM_TMO_UNDEF; - } - if (unlikely(!check_tmo_flags(flags))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x", - tmr, flags); - return EM_TMO_UNDEF; - } - } - - tmo_buf = odp_buffer_alloc(em_shm->timers.timer[i].tmo_pool); - if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr); - return EM_TMO_UNDEF; - } - - em_timer_timeout_t *tmo = odp_buffer_addr(tmo_buf); - - odptmr = em_shm->timers.timer[i].odp_tmr_pool; - tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->odp_queue, NULL); - if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CREATE, - "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", - tmr); - return EM_TMO_UNDEF; - } - - /* OK, init state */ - tmo->period = 0; - tmo->odp_timer_pool = odptmr; - tmo->odp_buffer = tmo_buf; - tmo->flags = flags; - tmo->queue = queue; - if (EM_TIMER_TMO_STATS) - memset(&tmo->stats, 0, sizeof(em_tmo_stats_t)); - odp_atomic_init_u32(&tmo->state, EM_TMO_STATE_IDLE); - TMR_DBG_PRINT("ODP tmo %ld allocated\n", (unsigned long)tmo->odp_timer); - return tmo; -} - -em_status_t em_tmo_delete(em_tmo_t tmo, em_event_t *cur_event) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF, EM_ERR_BAD_ID, - EM_ESCOPE_TMO_DELETE, "Invalid tmo"); - RETURN_ERROR_IF(cur_event == NULL, EM_ERR_BAD_POINTER, - EM_ESCOPE_TMO_DELETE, "NULL pointer given"); - } - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_DELETE, - "Invalid tmo state:%d", tmo_state); - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, - "Invalid tmo buffer"); - } - - TMR_DBG_PRINT("ODP tmo %ld\n", (unsigned long)tmo->odp_timer); - - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN); - - odp_event_t odp_evt = odp_timer_free(tmo->odp_timer); - em_event_t tmo_ev = EM_EVENT_UNDEF; - - if (odp_evt != ODP_EVENT_INVALID) - tmo_ev = event_odp2em(odp_evt); - - odp_buffer_t tmp = tmo->odp_buffer; - - tmo->odp_timer = ODP_TIMER_INVALID; - tmo->odp_buffer = ODP_BUFFER_INVALID; - odp_buffer_free(tmp); - - if (esv_enabled() && tmo_ev != EM_EVENT_UNDEF) - tmo_ev = evstate_em2usr(tmo_ev, event_to_hdr(tmo_ev), EVSTATE__TMO_DELETE); - - *cur_event = tmo_ev; - return EM_OK; -} - -em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, - em_event_t tmo_ev) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, tmo_ev); - RETURN_ERROR_IF(tmo->flags & EM_TMO_FLAG_PERIODIC, - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_ABS, - "Cannot set periodic tmo, use _set_periodic()"); - } - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_ABS, - "Invalid tmo state:%d", tmo_state); - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, - "Invalid tmo buffer"); - } - - event_hdr_t *ev_hdr = NULL; - odp_event_t odp_ev = event_em2odp(tmo_ev); - bool esv_ena = esv_enabled(); - odp_timer_start_t startp; - - if (esv_ena) { - ev_hdr = event_to_hdr(tmo_ev); - evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS); - } - - /* set tmo active and arm with absolute time */ - startp.tick_type = ODP_TIMER_TICK_ABS; - startp.tick = ticks_abs; - startp.tmo_ev = odp_ev; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int ret = odp_timer_start(tmo->odp_timer, &startp); - - if (unlikely(ret != ODP_TIMER_SUCCESS)) { - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - if (esv_ena) - evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS__FAIL); - if (ret == ODP_TIMER_TOOLATE) - return EM_ERR_TOOFAR; - else if (ret == ODP_TIMER_TOOEARLY) - return EM_ERR_TOONEAR; - return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_SET_ABS, - "odp_timer_start():%d", ret); - } - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, - em_event_t tmo_ev) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, tmo_ev); - } - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_REL, - "Invalid tmo state:%d", tmo_state); - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL, - "Invalid tmo buffer"); - } - - event_hdr_t *ev_hdr = NULL; - odp_event_t odp_ev = event_em2odp(tmo_ev); - bool esv_ena = esv_enabled(); - odp_timer_start_t startp; - - if (esv_ena) { - ev_hdr = event_to_hdr(tmo_ev); - evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL); - } - - /* set tmo active and arm with relative time */ - tmo->period = ticks_rel; - if (unlikely(tmo->flags & EM_TMO_FLAG_PERIODIC)) { - tmo->last_tick = odp_timer_current_tick(tmo->odp_timer_pool) + - ticks_rel; - } - TMR_DBG_PRINT("last_tick %lu\n", tmo->last_tick); - startp.tick_type = ODP_TIMER_TICK_REL; - startp.tick = ticks_rel; - startp.tmo_ev = odp_ev; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int ret = odp_timer_start(tmo->odp_timer, &startp); - - if (unlikely(ret != ODP_TIMER_SUCCESS)) { - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - if (esv_ena) - evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL__FAIL); - return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_SET_REL, - "odp_timer_set_rel():%d", ret); - } - TMR_DBG_PRINT("OK\n"); - return EM_OK; -} - -em_status_t em_tmo_set_periodic(em_tmo_t tmo, - em_timer_tick_t start_abs, - em_timer_tick_t period, - em_event_t tmo_ev) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, tmo_ev); - RETURN_ERROR_IF(!(tmo->flags & EM_TMO_FLAG_PERIODIC), - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC, - "Not periodic tmo"); - } - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC, - "Invalid tmo state:%d", tmo_state); - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC, - "Invalid tmo buffer"); - } - - event_hdr_t *ev_hdr = NULL; - odp_event_t odp_ev = event_em2odp(tmo_ev); - bool esv_ena = esv_enabled(); - odp_timer_start_t startp; - - if (esv_ena) { - ev_hdr = event_to_hdr(tmo_ev); - evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC); - } - - TMR_DBG_PRINT("start %lu, period %lu\n", start_abs, period); - - tmo->period = period; - if (start_abs == 0) - start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period; - tmo->last_tick = start_abs; - TMR_DBG_PRINT("last_tick %lu, now %lu\n", tmo->last_tick, - odp_timer_current_tick(tmo->odp_timer_pool)); - - /* set tmo active and arm with absolute time */ - startp.tick_type = ODP_TIMER_TICK_ABS; - startp.tick = start_abs; - startp.tmo_ev = odp_ev; - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); - int ret = odp_timer_start(tmo->odp_timer, &startp); - - if (unlikely(ret != ODP_TIMER_SUCCESS)) { - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - if (esv_ena) - evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC__FAIL); - TMR_DBG_PRINT("diff to tmo %ld\n", - (int64_t)tmo->last_tick - - (int64_t)odp_timer_current_tick(tmo->odp_timer_pool)); - if (ret == ODP_TIMER_TOOLATE) - return EM_ERR_TOOFAR; - else if (ret == ODP_TIMER_TOOEARLY) - return EM_ERR_TOONEAR; - return INTERNAL_ERROR(EM_ERR_LIB_FAILED, - EM_ESCOPE_TMO_SET_PERIODIC, - "odp_timer_start():%d", ret); - } - return EM_OK; -} - -em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF, EM_ERR_BAD_ID, - EM_ESCOPE_TMO_CANCEL, "Invalid tmo"); - RETURN_ERROR_IF(cur_event == NULL, EM_ERR_BAD_POINTER, - EM_ESCOPE_TMO_CANCEL, "NULL pointer"); - } - if (EM_CHECK_LEVEL > 1) { - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - - RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, - "Invalid tmo state:%d", tmo_state); - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, - "Invalid tmo buffer"); - } - - TMR_DBG_PRINT("ODP tmo %ld\n", (unsigned long)tmo->odp_timer); - - /* cancel and set tmo idle */ - odp_event_t odp_ev = ODP_EVENT_INVALID; - - /* this will stop periodic latest at next ack */ - odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); - int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev); - - if (ret != 0) { - *cur_event = EM_EVENT_UNDEF; - if (EM_CHECK_LEVEL > 1) { - RETURN_ERROR_IF(odp_ev != ODP_EVENT_INVALID, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, - "Bug? ODP timer cancel fail but return event!"); - } - return EM_ERR_BAD_STATE; /* too late to cancel or already canceled */ - } - - em_event_t tmo_ev = event_odp2em(odp_ev); - - if (esv_enabled()) - tmo_ev = evstate_em2usr(tmo_ev, event_to_hdr(tmo_ev), EVSTATE__TMO_CANCEL); - - *cur_event = tmo_ev; - return EM_OK; -} - -em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev) -{ - if (EM_CHECK_LEVEL > 0) { - RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || - next_tmo_ev == EM_EVENT_UNDEF, - EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK, - "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", - tmo, next_tmo_ev); - RETURN_ERROR_IF(!(tmo->flags & EM_TMO_FLAG_PERIODIC), - EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_ACK, - "Tmo ACK: Not a periodic tmo"); - } - - if (EM_TIMER_TMO_STATS) - tmo->stats.num_acks++; - - em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); - /* - * If tmo cancelled: - * Return an error so the application can free the given event. - */ - if (tmo_state == EM_TMO_STATE_IDLE) /* canceled, no errorhandler */ - return EM_ERR_CANCELED; - - RETURN_ERROR_IF(tmo_state != EM_TMO_STATE_ACTIVE, - EM_ERR_BAD_STATE, EM_ESCOPE_TMO_ACK, - "Tmo ACK: invalid tmo state:%d", tmo_state); - - if (EM_CHECK_LEVEL > 1) { - RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), - EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK, - "Tmo ACK: invalid tmo buffer"); - } - - event_hdr_t *ev_hdr = NULL; - odp_event_t odp_ev = event_em2odp(next_tmo_ev); - bool esv_ena = esv_enabled(); - - if (esv_ena) { - ev_hdr = event_to_hdr(next_tmo_ev); - evstate_usr2em(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK); - } - - /* - * The periodic timer will silently stop if ack fails! Attempt to - * handle exceptions and if the tmo cannot be renewed, call - * the errorhandler so the application may recover. - */ - tmo->last_tick += tmo->period; /* maintain absolute time */ - int ret; - int tries = EM_TIMER_ACK_TRIES; - em_status_t err; - odp_timer_start_t startp; - - startp.tick_type = ODP_TIMER_TICK_ABS; - startp.tmo_ev = odp_ev; - - /* try to set tmo EM_TIMER_ACK_TRIES times */ - do { - /* ask new timeout for next period */ - startp.tick = tmo->last_tick; - ret = odp_timer_start(tmo->odp_timer, &startp); - /* - * Calling ack() was delayed over next period if 'ret' is - * ODP_TIMER_TOOEARLY, i.e. now in past. Other errors - * should not happen, fatal for this tmo - */ - if (likely(ret != ODP_TIMER_TOOEARLY)) { - if (ret != ODP_TIMER_SUCCESS) { - TMR_DBG_PRINT("ODP return %d\n" - "tmo tgt/tick now %lu/%lu\n", - ret, tmo->last_tick, - odp_timer_current_tick(tmo->odp_timer_pool)); - } - break; - } - - /* ODP_TIMER_TOOEARLY: ack() delayed beyond next time slot */ - if (EM_TIMER_TMO_STATS) - tmo->stats.num_late_ack++; - TMR_DBG_PRINT("late, tgt/now %lu/%lu\n", tmo->last_tick, - odp_timer_current_tick(tmo->odp_timer_pool)); - - if (tmo->flags & EM_TMO_FLAG_NOSKIP) /* not allowed to skip, next immediately */ - return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue); - - /* skip already passed periods */ - handle_ack_skip(tmo); - - tries--; - if (unlikely(tries < 1)) { - err = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_TMO_ACK, - "Tmo ACK: too many retries:%u", - EM_TIMER_ACK_TRIES); - goto ack_err; - } - } while (ret != ODP_TIMER_SUCCESS); - - if (unlikely(ret != ODP_TIMER_SUCCESS)) { - err = INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK, - "Tmo ACK: failed to renew tmo (odp ret %d)", - ret); - goto ack_err; - } - return EM_OK; - -ack_err: - if (esv_ena) - evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__FAIL); - return err; -} - -int em_timer_get_all(em_timer_t *tmr_list, int max) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(tmr_list == NULL || max < 1)) - return 0; - - int num = 0; - - odp_ticketlock_lock(&em_shm->timers.timer_lock); - for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) { - if (em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) { - tmr_list[num] = TMR_I2H(i); - num++; - if (num >= max) - break; - } - } - odp_ticketlock_unlock(&em_shm->timers.timer_lock); - - return num; -} - -em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr) -{ - odp_timer_pool_info_t poolinfo; - int i = TMR_H2I(tmr); - int ret; - size_t sz; - - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(!is_timer_valid(tmr) || tmr_attr == NULL, - EM_ERR_BAD_ID, EM_ESCOPE_TIMER_GET_ATTR, - "Inv.args: timer:%" PRI_TMR " tmr_attr:%p", - tmr, tmr_attr); - - /* get current values from ODP */ - ret = odp_timer_pool_info(em_shm->timers.timer[i].odp_tmr_pool, &poolinfo); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_GET_ATTR, - "ODP timer pool info failed"); - - tmr_attr->resparam.res_ns = poolinfo.param.res_ns; - tmr_attr->resparam.max_tmo = poolinfo.param.max_tmo; - tmr_attr->resparam.min_tmo = poolinfo.param.min_tmo; - tmr_attr->num_tmo = poolinfo.param.num_timers; - tmr_attr->flags = em_shm->timers.timer[i].flags; - timer_clksrc_odp2em(poolinfo.param.clk_src, &tmr_attr->resparam.clk_src); - sz = sizeof(tmr_attr->name); - strncpy(tmr_attr->name, poolinfo.name, sz - 1); - tmr_attr->name[sz - 1] = '\0'; - - return EM_OK; -} - -uint64_t em_timer_get_freq(em_timer_t tmr) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TIMER_GET_FREQ, - "Invalid timer:%" PRI_TMR "", tmr); - return 0; - } - - return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, - 1000ULL * 1000ULL * 1000ULL); /* 1 sec */ -} - -uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TIMER_TICK_TO_NS, - "Invalid timer:%" PRI_TMR "", tmr); - return 0; - } - return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, (uint64_t)ticks); -} - -em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns) -{ - const timer_storage_t *const tmrs = &em_shm->timers; - - if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TIMER_NS_TO_TICK, - "Invalid timer:%" PRI_TMR "", tmr); - return 0; - } - return (em_timer_tick_t)odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns); -} - -em_tmo_state_t em_tmo_get_state(em_tmo_t tmo) -{ - if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo"); - return EM_TMO_STATE_UNKNOWN; - } - if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo buffer"); - return EM_TMO_STATE_UNKNOWN; - } - return odp_atomic_load_acq_u32(&tmo->state); -} - -em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat) -{ - if (EM_CHECK_LEVEL > 0) { - if (unlikely(tmo == EM_TMO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATS, "Invalid tmo"); - return EM_ERR_BAD_ID; - } - if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATS, - "tmo deleted?"); - return EM_ERR_BAD_STATE; - } - } - - if (EM_TIMER_TMO_STATS) { - if (stat) - *stat = tmo->stats; - } else { - return EM_ERR_NOT_IMPLEMENTED; - } - return EM_OK; -} +/* + * Copyright (c) 2016, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * --------------------------------------------------------------------- + * Some notes about the implementation: + * + * EM timer add-on API is close to ODP timer, but there are issues + * making this code a bit more complex than it could be: + * + * 1) no periodic timer in ODP + * 2) unless using the pre-defined timeout event there is no way to access + * all necessary information runtime to implement a periodic timer + * + * Point 2 is solved by creating a timeout pool. When user allocates + * EM timeout, a new minimum size buffer is allocated to store all the needed + * information. Timer handle is a pointer to such buffer so all data is + * available via the handle (ack() is the most problematic case). This does + * create performance penalty, but so far it looks like the penalty is not + * too large and does simplify the code otherwise. Also timeouts could be + * pre-allocated as the API separates creation and arming. + * Most of the syncronization is handled by ODP timer, a ticketlock is used + * for high level management API. + * + */ +#include "em_include.h" +#include +#include "em_timer.h" + +/* timer handle = index + 1 (UNDEF 0) */ +#define TMR_I2H(x) ((em_timer_t)(uintptr_t)((x) + 1)) +#define TMR_H2I(x) ((int)((uintptr_t)(x) - 1)) + +static inline int is_queue_valid_type(em_timer_t tmr, const queue_elem_t *q_elem) +{ + unsigned int tmridx = (unsigned int)TMR_H2I(tmr); + + /* implementation specific */ + if (em_shm->timers.timer[tmridx].plain_q_ok && q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED) + return 1; + /* EM assumes scheduled always supported */ + return (q_elem->type == EM_QUEUE_TYPE_ATOMIC || + q_elem->type == EM_QUEUE_TYPE_PARALLEL || + q_elem->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) ? 1 : 0; + + /* LOCAL or OUTPUT queues not supported */ +} + +static inline int is_timer_valid(em_timer_t tmr) +{ + unsigned int i; + const timer_storage_t *const tmrs = &em_shm->timers; + + if (unlikely(tmr == EM_TIMER_UNDEF)) + return 0; + + i = (unsigned int)TMR_H2I(tmr); + if (unlikely(i >= EM_ODP_MAX_TIMERS)) + return 0; + + if (unlikely(tmrs->timer[i].odp_tmr_pool == ODP_TIMER_POOL_INVALID || + tmrs->timer[i].tmo_pool == ODP_POOL_INVALID)) + return 0; + return 1; +} + +static inline em_status_t handle_ack_noskip(em_event_t next_tmo_ev, + event_hdr_t *ev_hdr, + em_queue_t queue) +{ + if (esv_enabled()) + evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__NOSKIP); + + em_status_t err = em_send(next_tmo_ev, queue); + + if (unlikely(err != EM_OK)) + err = INTERNAL_ERROR(err, EM_ESCOPE_TMO_ACK, "Tmo ACK: noskip em_send fail"); + + return err; /* EM_OK or send-failure */ +} + +static inline void handle_ack_skip(em_tmo_t tmo) +{ + uint64_t odpt = odp_timer_current_tick(tmo->odp_timer_pool); + uint64_t skips; + + if (odpt > tmo->last_tick) /* late, over next period */ + skips = ((odpt - tmo->last_tick) / tmo->period) + 1; + else + skips = 1; /* not yet over next period, but late for setting */ + + tmo->last_tick += skips * tmo->period; + TMR_DBG_PRINT("%lu skips * %lu ticks => new tgt %lu\n", + skips, tmo->period, tmo->last_tick); + if (EM_TIMER_TMO_STATS) + tmo->stats.num_period_skips += skips; +} + +static inline bool check_tmo_flags(em_tmo_flag_t flags) +{ + /* Check for valid tmo flags (oneshot OR periodic mainly) */ + if (unlikely(!(flags & (EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC)))) + return false; + + if (unlikely((flags & EM_TMO_FLAG_ONESHOT) && (flags & EM_TMO_FLAG_PERIODIC))) + return false; + + if (EM_CHECK_LEVEL > 1) { + em_tmo_flag_t inv_flags = ~(EM_TMO_FLAG_ONESHOT | EM_TMO_FLAG_PERIODIC | + EM_TMO_FLAG_NOSKIP); + if (unlikely(flags & inv_flags)) + return false; + } + return true; +} + +static inline bool check_timer_attr(const em_timer_attr_t *tmr_attr) +{ + if (unlikely(tmr_attr == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_TIMER_CREATE, + "NULL ptr given"); + return false; + } + if (unlikely(tmr_attr->__internal_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_TIMER_CREATE, + "em_timer_attr_t not initialized"); + return false; + } + if (unlikely(tmr_attr->resparam.res_ns && tmr_attr->resparam.res_hz)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, + "Only res_ns OR res_hz allowed"); + return false; + } + return true; +} + +void em_timer_attr_init(em_timer_attr_t *tmr_attr) +{ + if (unlikely(EM_CHECK_LEVEL > 0 && tmr_attr == NULL)) + return; /* just ignore NULL here */ + + /* strategy: first put default resolution, then validate based on that */ + tmr_attr->resparam.res_ns = EM_ODP_TIMER_RESOL_DEF_NS; + tmr_attr->resparam.res_hz = 0; + tmr_attr->resparam.clk_src = EM_TIMER_CLKSRC_DEFAULT; + tmr_attr->flags = EM_TIMER_FLAG_DEFAULT; + + odp_timer_clk_src_t odp_clksrc; + odp_timer_capability_t odp_capa; + odp_timer_res_capability_t odp_res_capa; + int err; + + err = timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_ATTR_INIT, + "Unsupported EM-timer clock source:%d", + tmr_attr->resparam.clk_src); + return; + } + err = odp_timer_capability(odp_clksrc, &odp_capa); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, + "Timer capability: ret %d, odp-clksrc:%d", + err, odp_clksrc); + return; + } + + memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); + odp_res_capa.res_ns = tmr_attr->resparam.res_ns; + err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_ATTR_INIT, + "Timer res capability: ret %d, odp-clksrc:%d, res %lu", + err, odp_clksrc, tmr_attr->resparam.res_ns); + return; + } + + TMR_DBG_PRINT("res %lu -> ODP says min %lu, max %lu\n", + tmr_attr->resparam.res_ns, odp_res_capa.min_tmo, + odp_res_capa.max_tmo); + + tmr_attr->num_tmo = EM_ODP_DEFAULT_TMOS; + if (odp_capa.max_timers && odp_capa.max_timers < EM_ODP_DEFAULT_TMOS) + tmr_attr->num_tmo = odp_capa.max_timers; + + tmr_attr->resparam.min_tmo = odp_res_capa.min_tmo; + tmr_attr->resparam.max_tmo = odp_res_capa.max_tmo; + tmr_attr->name[0] = 0; /* timer_create will add default (no index available here) */ + tmr_attr->__internal_check = EM_CHECK_INIT_CALLED; +} + +em_status_t em_timer_capability(em_timer_capability_t *capa, em_timer_clksrc_t clk_src) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(capa == NULL)) { + EM_LOG(EM_LOG_DBG, "%s(): NULL capa ptr!\n", __func__); + return EM_ERR_BAD_POINTER; + } + + odp_timer_clk_src_t odp_clksrc; + odp_timer_capability_t odp_capa; + + if (unlikely(timer_clksrc_em2odp(clk_src, &odp_clksrc) || + odp_timer_capability(odp_clksrc, &odp_capa))) { + EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); + return EM_ERR_BAD_ARG; + } + + capa->max_timers = odp_capa.max_pools < EM_ODP_MAX_TIMERS ? + odp_capa.max_pools : EM_ODP_MAX_TIMERS; + capa->max_num_tmo = odp_capa.max_timers; + capa->max_res.clk_src = clk_src; + capa->max_res.res_ns = odp_capa.max_res.res_ns; + capa->max_res.res_hz = odp_capa.max_res.res_hz; + capa->max_res.min_tmo = odp_capa.max_res.min_tmo; + capa->max_res.max_tmo = odp_capa.max_res.max_tmo; + capa->max_tmo.clk_src = clk_src; + capa->max_tmo.res_ns = odp_capa.max_tmo.res_ns; + capa->max_tmo.res_hz = odp_capa.max_tmo.res_hz; + capa->max_tmo.min_tmo = odp_capa.max_tmo.min_tmo; + capa->max_tmo.max_tmo = odp_capa.max_tmo.max_tmo; + return EM_OK; +} + +em_status_t em_timer_res_capability(em_timer_res_param_t *res, em_timer_clksrc_t clk_src) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(res == NULL)) { + EM_LOG(EM_LOG_DBG, "%s: NULL ptr res\n", __func__); + return EM_ERR_BAD_POINTER; + } + + odp_timer_clk_src_t odp_clksrc; + odp_timer_res_capability_t odp_res_capa; + int err; + + err = timer_clksrc_em2odp(clk_src, &odp_clksrc); + if (unlikely(err)) { + EM_LOG(EM_LOG_DBG, "%s: Not supported clk_src %d\n", __func__, clk_src); + return EM_ERR_BAD_ARG; + } + memset(&odp_res_capa, 0, sizeof(odp_timer_res_capability_t)); + odp_res_capa.res_ns = res->res_ns; + odp_res_capa.res_hz = res->res_hz; + odp_res_capa.max_tmo = res->max_tmo; /* ODP will check if both were set */ + err = odp_timer_res_capability(odp_clksrc, &odp_res_capa); + if (unlikely(err)) { + EM_LOG(EM_LOG_DBG, "%s: ODP res_capability failed (ret %d)!\n", __func__, err); + return EM_ERR_BAD_ARG; + } + res->min_tmo = odp_res_capa.min_tmo; + res->max_tmo = odp_res_capa.max_tmo; + res->res_ns = odp_res_capa.res_ns; + res->res_hz = odp_res_capa.res_hz; + res->clk_src = clk_src; + return EM_OK; +} + +em_timer_t em_timer_create(const em_timer_attr_t *tmr_attr) +{ + if (EM_CHECK_LEVEL > 0) { + if (check_timer_attr(tmr_attr) == false) + return EM_TIMER_UNDEF; + } + + odp_timer_pool_param_t odp_tpool_param; + odp_timer_clk_src_t odp_clksrc; + + odp_timer_pool_param_init(&odp_tpool_param); + odp_tpool_param.res_ns = tmr_attr->resparam.res_ns; + odp_tpool_param.res_hz = tmr_attr->resparam.res_hz; + odp_tpool_param.min_tmo = tmr_attr->resparam.min_tmo; + odp_tpool_param.max_tmo = tmr_attr->resparam.max_tmo; + odp_tpool_param.num_timers = tmr_attr->num_tmo; + odp_tpool_param.priv = tmr_attr->flags & EM_TIMER_FLAG_PRIVATE ? 1 : 0; + if (unlikely(timer_clksrc_em2odp(tmr_attr->resparam.clk_src, &odp_clksrc))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_CREATE, + "Unsupported EM-timer clock source:%d", + tmr_attr->resparam.clk_src); + return EM_TIMER_UNDEF; + } + odp_tpool_param.clk_src = odp_clksrc; + + /* check queue type support */ + odp_timer_capability_t capa; + + if (unlikely(odp_timer_capability(odp_clksrc, &capa))) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, + "ODP timer capa failed for clk:%d", + tmr_attr->resparam.clk_src); + return EM_TIMER_UNDEF; + } + if (unlikely(!capa.queue_type_sched)) { /* must support scheduled queues */ + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, + "ODP does not support scheduled q for clk:%d", + tmr_attr->resparam.clk_src); + return EM_TIMER_UNDEF; + } + + /* buffer pool for tmos */ + odp_pool_param_t odp_pool_param; + + odp_pool_param_init(&odp_pool_param); + odp_pool_param.type = ODP_POOL_BUFFER; + odp_pool_param.buf.size = sizeof(em_timer_timeout_t); + odp_pool_param.buf.align = ODP_CACHE_LINE_SIZE; + if (odp_pool_param.buf.cache_size > EM_ODP_TIMER_CACHE) + odp_pool_param.buf.cache_size = EM_ODP_TIMER_CACHE; + TMR_DBG_PRINT("local tmo pool cache %d\n", odp_pool_param.buf.cache_size); + + /* local pool caching may cause out of buffers situation on a core. Adjust, + * but not waste too much memory + */ + uint32_t num = tmr_attr->num_tmo + ((em_core_count() - 1) * odp_pool_param.buf.cache_size); + + if (tmr_attr->num_tmo < num) { + TMR_DBG_PRINT("Adjusted pool size %d->%d due to local caching (%d)\n", + tmr_attr->num_tmo, num, odp_pool_param.buf.cache_size); + } + odp_pool_param.buf.num = num; + + /* + * Find a free timer-slot. + * This linear search should not be a performance problem with only a few timers + * available especially when these are typically created at startup. + */ + int i; + event_timer_t *timer; + + odp_ticketlock_lock(&em_shm->timers.timer_lock); + + for (i = 0; i < EM_ODP_MAX_TIMERS; i++) { + timer = &em_shm->timers.timer[i]; + if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID) /* marks used entry */ + continue; + + char timer_pool_name[ODP_TIMER_POOL_NAME_LEN]; + char tmo_pool_name[ODP_POOL_NAME_LEN]; + const char *name = tmr_attr->name; + + if (tmr_attr->name[0] == '\0') { /* replace NULL with default */ + snprintf(timer_pool_name, ODP_TIMER_POOL_NAME_LEN, + "EM-timer-%d", timer->idx); /* idx initialized by timer_init */ + name = timer_pool_name; + } + + TMR_DBG_PRINT("Creating ODP tmr pool: clk %d, res_ns %lu, res_hz %lu\n", + odp_tpool_param.clk_src, odp_tpool_param.res_ns, + odp_tpool_param.res_hz); + timer->odp_tmr_pool = odp_timer_pool_create(name, &odp_tpool_param); + if (unlikely(timer->odp_tmr_pool == ODP_TIMER_POOL_INVALID)) + goto error_locked; + TMR_DBG_PRINT("Created timer: %s with idx: %d\n", name, timer->idx); + + snprintf(tmo_pool_name, ODP_POOL_NAME_LEN, "Tmo-pool-%d", timer->idx); + timer->tmo_pool = odp_pool_create(tmo_pool_name, &odp_pool_param); + if (unlikely(timer->tmo_pool == ODP_POOL_INVALID)) + goto error_locked; + TMR_DBG_PRINT("Created ODP-pool: %s for %d timeouts\n", + tmo_pool_name, odp_pool_param.buf.num); + + timer->flags = tmr_attr->flags; + timer->plain_q_ok = capa.queue_type_plain; + odp_timer_pool_start(); + break; + } + + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + if (unlikely(i >= EM_ODP_MAX_TIMERS)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TIMER_CREATE, + "No more timers available"); + return EM_TIMER_UNDEF; + } + TMR_DBG_PRINT("ret %" PRI_TMR "\n", TMR_I2H(i)); + return TMR_I2H(i); + +error_locked: + /* odp_ticketlock_lock(&timer_shm->tlock) */ + + /* 'timer' set in loop */ + if (timer->tmo_pool != ODP_POOL_INVALID) + odp_pool_destroy(timer->tmo_pool); + if (timer->odp_tmr_pool != ODP_TIMER_POOL_INVALID) + odp_timer_pool_destroy(timer->odp_tmr_pool); + timer->tmo_pool = ODP_POOL_INVALID; + timer->odp_tmr_pool = ODP_TIMER_POOL_INVALID; + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + TMR_DBG_PRINT("ERR odp tmr pool in: clk %u, res %lu, min %lu, max %lu, num %u\n", + odp_tpool_param.clk_src, odp_tpool_param.res_ns, + odp_tpool_param.min_tmo, odp_tpool_param.max_tmo, odp_tpool_param.num_timers); + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_CREATE, "Timer pool create failed"); + return EM_TIMER_UNDEF; +} + +em_status_t em_timer_delete(em_timer_t tmr) +{ + timer_storage_t *const tmrs = &em_shm->timers; + int i = TMR_H2I(tmr); + + odp_ticketlock_lock(&tmrs->timer_lock); + /* take lock before checking so nothing can change */ + if (unlikely(!is_timer_valid(tmr))) { + odp_ticketlock_unlock(&tmrs->timer_lock); + return INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_DELETE, + "Invalid timer:%" PRI_TMR "", tmr); + } + + odp_pool_destroy(tmrs->timer[i].tmo_pool); + tmrs->timer[i].tmo_pool = ODP_POOL_INVALID; + odp_timer_pool_destroy(tmrs->timer[i].odp_tmr_pool); + tmrs->timer[i].odp_tmr_pool = ODP_TIMER_POOL_INVALID; + + odp_ticketlock_unlock(&tmrs->timer_lock); + return EM_OK; +} + +em_timer_tick_t em_timer_current_tick(em_timer_t tmr) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + int i = TMR_H2I(tmr); + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) + return 0; + + return odp_timer_current_tick(tmrs->timer[i].odp_tmr_pool); +} + +em_tmo_t em_tmo_create(em_timer_t tmr, em_tmo_flag_t flags, em_queue_t queue) +{ + const queue_elem_t *const q_elem = queue_elem_get(queue); + + if (EM_CHECK_LEVEL > 0) { + if (unlikely(!is_timer_valid(tmr))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Invalid timer:%" PRI_TMR "", tmr); + return EM_TMO_UNDEF; + } + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": inv.Q:%" PRI_QUEUE "", + tmr, queue); + return EM_TMO_UNDEF; + } + if (unlikely(!is_queue_valid_type(tmr, q_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": inv.Q (type):%" PRI_QUEUE "", + tmr, queue); + return EM_TMO_UNDEF; + } + if (unlikely(!check_tmo_flags(flags))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": inv. tmo-flags:0x%x", + tmr, flags); + return EM_TMO_UNDEF; + } + } + + int i = TMR_H2I(tmr); + odp_buffer_t tmo_buf = odp_buffer_alloc(em_shm->timers.timer[i].tmo_pool); + + if (unlikely(tmo_buf == ODP_BUFFER_INVALID)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": tmo pool exhausted", tmr); + return EM_TMO_UNDEF; + } + + em_timer_timeout_t *tmo = odp_buffer_addr(tmo_buf); + odp_timer_pool_t odptmr = em_shm->timers.timer[i].odp_tmr_pool; + + tmo->odp_timer = odp_timer_alloc(odptmr, q_elem->odp_queue, NULL); + if (unlikely(tmo->odp_timer == ODP_TIMER_INVALID)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CREATE, + "Tmr:%" PRI_TMR ": odp_timer_alloc() failed", tmr); + odp_buffer_free(tmo_buf); /* free tmo buf before return */ + return EM_TMO_UNDEF; + } + + /* OK, init state */ + tmo->period = 0; + tmo->odp_timer_pool = odptmr; + tmo->odp_buffer = tmo_buf; + tmo->flags = flags; + tmo->queue = queue; + if (EM_TIMER_TMO_STATS) + memset(&tmo->stats, 0, sizeof(em_tmo_stats_t)); + odp_atomic_init_u32(&tmo->state, EM_TMO_STATE_IDLE); + TMR_DBG_PRINT("ODP tmo %ld allocated\n", (unsigned long)tmo->odp_timer); + + return tmo; +} + +em_status_t em_tmo_delete(em_tmo_t tmo, em_event_t *cur_event) +{ + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_DELETE, + "Invalid args: tmo:%" PRI_TMO " cur_event:%p", + tmo, cur_event); + } + if (EM_CHECK_LEVEL > 1) { + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, + "Invalid tmo buffer"); + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_DELETE, + "Invalid tmo state:%d", tmo_state); + } + if (EM_CHECK_LEVEL > 2) { + RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_ID, EM_ESCOPE_TMO_DELETE, + "Invalid tmo odp_timer"); + } + + TMR_DBG_PRINT("ODP tmo %ld\n", (unsigned long)tmo->odp_timer); + + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_UNKNOWN); + + odp_event_t odp_evt = odp_timer_free(tmo->odp_timer); + odp_buffer_t tmp = tmo->odp_buffer; + em_event_t tmo_ev = EM_EVENT_UNDEF; + + tmo->odp_timer = ODP_TIMER_INVALID; + tmo->odp_buffer = ODP_BUFFER_INVALID; + odp_buffer_free(tmp); + + if (odp_evt != ODP_EVENT_INVALID) { + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && !odp_event_is_valid(odp_evt), + EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_DELETE, + "Invalid tmo event"); + + tmo_ev = event_odp2em(odp_evt); + if (esv_enabled()) + tmo_ev = evstate_em2usr(tmo_ev, event_to_hdr(tmo_ev), + EVSTATE__TMO_DELETE); + } + + *cur_event = tmo_ev; + return EM_OK; +} + +em_status_t em_tmo_set_abs(em_tmo_t tmo, em_timer_tick_t ticks_abs, + em_event_t tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_ABS, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, tmo_ev); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_ABS, + "Cannot set periodic tmo, use _set_periodic()"); + if (EM_CHECK_LEVEL > 1) { + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_ABS, + "Invalid tmo state:%d", tmo_state); + } + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && + tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_ABS, + "Invalid tmo odp_timer"); + + event_hdr_t *ev_hdr = NULL; + odp_event_t odp_ev = event_em2odp(tmo_ev); + bool esv_ena = esv_enabled(); + odp_timer_start_t startp; + + if (esv_ena) { + ev_hdr = event_to_hdr(tmo_ev); + evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS); + } + + /* set tmo active and arm with absolute time */ + startp.tick_type = ODP_TIMER_TICK_ABS; + startp.tick = ticks_abs; + startp.tmo_ev = odp_ev; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int ret = odp_timer_start(tmo->odp_timer, &startp); + + if (unlikely(ret != ODP_TIMER_SUCCESS)) { + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + if (esv_ena) + evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_ABS__FAIL); + if (ret == ODP_TIMER_TOO_FAR) + return EM_ERR_TOOFAR; + else if (ret == ODP_TIMER_TOO_NEAR) + return EM_ERR_TOONEAR; + return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_SET_ABS, + "odp_timer_start():%d", ret); + } + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_set_rel(em_tmo_t tmo, em_timer_tick_t ticks_rel, + em_event_t tmo_ev) +{ + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_REL, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, tmo_ev); + } + if (EM_CHECK_LEVEL > 1) { + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_REL, + "Invalid tmo buffer"); + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_REL, + "Invalid tmo state:%d", tmo_state); + } + + event_hdr_t *ev_hdr = NULL; + odp_event_t odp_ev = event_em2odp(tmo_ev); + bool esv_ena = esv_enabled(); + odp_timer_start_t startp; + + if (esv_ena) { + ev_hdr = event_to_hdr(tmo_ev); + evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL); + } + + /* set tmo active and arm with relative time */ + tmo->period = ticks_rel; + if (unlikely(tmo->flags & EM_TMO_FLAG_PERIODIC)) { + tmo->last_tick = odp_timer_current_tick(tmo->odp_timer_pool) + + ticks_rel; + } + TMR_DBG_PRINT("last_tick %lu\n", tmo->last_tick); + startp.tick_type = ODP_TIMER_TICK_REL; + startp.tick = ticks_rel; + startp.tmo_ev = odp_ev; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int ret = odp_timer_start(tmo->odp_timer, &startp); + + if (unlikely(ret != ODP_TIMER_SUCCESS)) { + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + if (esv_ena) + evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_REL__FAIL); + return INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_SET_REL, + "odp_timer_start():%d", ret); + } + TMR_DBG_PRINT("OK\n"); + return EM_OK; +} + +em_status_t em_tmo_set_periodic(em_tmo_t tmo, + em_timer_tick_t start_abs, + em_timer_tick_t period, + em_event_t tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo == EM_TMO_UNDEF || tmo_ev == EM_EVENT_UNDEF), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_SET_PERIODIC, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, tmo_ev); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_SET_PERIODIC, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_SET_PERIODIC, + "Not periodic tmo"); + if (EM_CHECK_LEVEL > 1) { + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_SET_PERIODIC, + "Invalid tmo state:%d", tmo_state); + } + + event_hdr_t *ev_hdr = NULL; + odp_event_t odp_ev = event_em2odp(tmo_ev); + bool esv_ena = esv_enabled(); + odp_timer_start_t startp; + + if (esv_ena) { + ev_hdr = event_to_hdr(tmo_ev); + evstate_usr2em(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC); + } + + TMR_DBG_PRINT("start %lu, period %lu\n", start_abs, period); + + tmo->period = period; + if (start_abs == 0) + start_abs = odp_timer_current_tick(tmo->odp_timer_pool) + period; + tmo->last_tick = start_abs; + TMR_DBG_PRINT("last_tick %lu, now %lu\n", tmo->last_tick, + odp_timer_current_tick(tmo->odp_timer_pool)); + + /* set tmo active and arm with absolute time */ + startp.tick_type = ODP_TIMER_TICK_ABS; + startp.tick = start_abs; + startp.tmo_ev = odp_ev; + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_ACTIVE); + int ret = odp_timer_start(tmo->odp_timer, &startp); + + if (unlikely(ret != ODP_TIMER_SUCCESS)) { + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + if (esv_ena) + evstate_usr2em_revert(tmo_ev, ev_hdr, EVSTATE__TMO_SET_PERIODIC__FAIL); + TMR_DBG_PRINT("diff to tmo %ld\n", + (int64_t)tmo->last_tick - + (int64_t)odp_timer_current_tick(tmo->odp_timer_pool)); + if (ret == ODP_TIMER_TOO_FAR) + return EM_ERR_TOOFAR; + else if (ret == ODP_TIMER_TOO_NEAR) + return EM_ERR_TOONEAR; + return INTERNAL_ERROR(EM_ERR_LIB_FAILED, + EM_ESCOPE_TMO_SET_PERIODIC, + "odp_timer_start():%d", ret); + } + return EM_OK; +} + +em_status_t em_tmo_cancel(em_tmo_t tmo, em_event_t *cur_event) +{ + if (EM_CHECK_LEVEL > 0) { + RETURN_ERROR_IF(tmo == EM_TMO_UNDEF || cur_event == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_CANCEL, + "Invalid args: tmo:%" PRI_TMO " cur_event:%p", + tmo, cur_event); + } + if (EM_CHECK_LEVEL > 1) { + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(!odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo buffer"); + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + + RETURN_ERROR_IF(tmo_state == EM_TMO_STATE_UNKNOWN, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo state:%d", tmo_state); + RETURN_ERROR_IF(tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_ID, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo odp_timer"); + } + + TMR_DBG_PRINT("ODP tmo %ld\n", (unsigned long)tmo->odp_timer); + + /* cancel and set tmo idle */ + odp_event_t odp_ev = ODP_EVENT_INVALID; + + /* this will stop periodic latest at next ack */ + odp_atomic_store_rel_u32(&tmo->state, EM_TMO_STATE_IDLE); + int ret = odp_timer_cancel(tmo->odp_timer, &odp_ev); + + if (ret != 0) { + *cur_event = EM_EVENT_UNDEF; + if (EM_CHECK_LEVEL > 1) { + RETURN_ERROR_IF(odp_ev != ODP_EVENT_INVALID, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_CANCEL, + "Bug? ODP timer cancel fail but return event!"); + } + return EM_ERR_BAD_STATE; /* too late to cancel or already canceled */ + } + + /* + * Cancel successful (ret == 0): odp_ev contains the canceled tmo event + */ + + if (EM_CHECK_LEVEL > 2) { + RETURN_ERROR_IF(!odp_event_is_valid(odp_ev), + EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_CANCEL, + "Invalid tmo event"); + } + + em_event_t tmo_ev = event_odp2em(odp_ev); + + if (esv_enabled()) + tmo_ev = evstate_em2usr(tmo_ev, event_to_hdr(tmo_ev), EVSTATE__TMO_CANCEL); + + *cur_event = tmo_ev; + return EM_OK; +} + +em_status_t em_tmo_ack(em_tmo_t tmo, em_event_t next_tmo_ev) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + (tmo == EM_TMO_UNDEF || next_tmo_ev == EM_EVENT_UNDEF), + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_ACK, + "Inv.args: tmo:%" PRI_TMO " ev:%" PRI_EVENT "", + tmo, next_tmo_ev); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_ACK, + "Tmo ACK: invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !(tmo->flags & EM_TMO_FLAG_PERIODIC), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_TMO_ACK, + "Tmo ACK: Not a periodic tmo"); + + if (EM_TIMER_TMO_STATS) + tmo->stats.num_acks++; + + em_tmo_state_t tmo_state = odp_atomic_load_acq_u32(&tmo->state); + /* + * If tmo cancelled: + * Return an error so the application can free the given event. + */ + if (tmo_state == EM_TMO_STATE_IDLE) /* canceled, no errorhandler */ + return EM_ERR_CANCELED; + + RETURN_ERROR_IF(tmo_state != EM_TMO_STATE_ACTIVE, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_ACK, + "Tmo ACK: invalid tmo state:%d", tmo_state); + + event_hdr_t *ev_hdr = NULL; + odp_event_t odp_ev = event_em2odp(next_tmo_ev); + bool esv_ena = esv_enabled(); + + if (esv_ena) { + ev_hdr = event_to_hdr(next_tmo_ev); + evstate_usr2em(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK); + } + + /* + * The periodic timer will silently stop if ack fails! Attempt to + * handle exceptions and if the tmo cannot be renewed, call + * the errorhandler so the application may recover. + */ + tmo->last_tick += tmo->period; /* maintain absolute time */ + int ret; + int tries = EM_TIMER_ACK_TRIES; + em_status_t err; + odp_timer_start_t startp; + + startp.tick_type = ODP_TIMER_TICK_ABS; + startp.tmo_ev = odp_ev; + + /* try to set tmo EM_TIMER_ACK_TRIES times */ + do { + /* ask new timeout for next period */ + startp.tick = tmo->last_tick; + ret = odp_timer_start(tmo->odp_timer, &startp); + /* + * Calling ack() was delayed over next period if 'ret' is + * ODP_TIMER_TOO_NEAR, i.e. now in past. Other errors + * should not happen, fatal for this tmo + */ + if (likely(ret != ODP_TIMER_TOO_NEAR)) { + if (ret != ODP_TIMER_SUCCESS) { + TMR_DBG_PRINT("ODP return %d\n" + "tmo tgt/tick now %lu/%lu\n", + ret, tmo->last_tick, + odp_timer_current_tick(tmo->odp_timer_pool)); + } + break; + } + + /* ODP_TIMER_TOO_NEAR: ack() delayed beyond next time slot */ + if (EM_TIMER_TMO_STATS) + tmo->stats.num_late_ack++; + TMR_DBG_PRINT("late, tgt/now %lu/%lu\n", tmo->last_tick, + odp_timer_current_tick(tmo->odp_timer_pool)); + + if (tmo->flags & EM_TMO_FLAG_NOSKIP) /* not allowed to skip, next immediately */ + return handle_ack_noskip(next_tmo_ev, ev_hdr, tmo->queue); + + /* skip already passed periods */ + handle_ack_skip(tmo); + + tries--; + if (unlikely(tries < 1)) { + err = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_TMO_ACK, + "Tmo ACK: too many retries:%u", + EM_TIMER_ACK_TRIES); + goto ack_err; + } + } while (ret != ODP_TIMER_SUCCESS); + + if (unlikely(ret != ODP_TIMER_SUCCESS)) { + err = INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_TMO_ACK, + "Tmo ACK: failed to renew tmo (odp ret %d)", + ret); + goto ack_err; + } + return EM_OK; + +ack_err: + if (esv_ena) + evstate_usr2em_revert(next_tmo_ev, ev_hdr, EVSTATE__TMO_ACK__FAIL); + return err; +} + +int em_timer_get_all(em_timer_t *tmr_list, int max) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(tmr_list == NULL || max < 1)) + return 0; + + int num = 0; + + odp_ticketlock_lock(&em_shm->timers.timer_lock); + for (int i = 0; i < EM_ODP_MAX_TIMERS; i++) { + if (em_shm->timers.timer[i].odp_tmr_pool != ODP_TIMER_POOL_INVALID) { + tmr_list[num] = TMR_I2H(i); + num++; + if (num >= max) + break; + } + } + odp_ticketlock_unlock(&em_shm->timers.timer_lock); + + return num; +} + +em_status_t em_timer_get_attr(em_timer_t tmr, em_timer_attr_t *tmr_attr) +{ + odp_timer_pool_info_t poolinfo; + int i = TMR_H2I(tmr); + int ret; + size_t sz; + + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(!is_timer_valid(tmr) || tmr_attr == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_ATTR, + "Inv.args: timer:%" PRI_TMR " tmr_attr:%p", + tmr, tmr_attr); + + /* get current values from ODP */ + ret = odp_timer_pool_info(em_shm->timers.timer[i].odp_tmr_pool, &poolinfo); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TIMER_GET_ATTR, + "ODP timer pool info failed"); + + tmr_attr->resparam.res_ns = poolinfo.param.res_ns; + tmr_attr->resparam.max_tmo = poolinfo.param.max_tmo; + tmr_attr->resparam.min_tmo = poolinfo.param.min_tmo; + tmr_attr->num_tmo = poolinfo.param.num_timers; + tmr_attr->flags = em_shm->timers.timer[i].flags; + timer_clksrc_odp2em(poolinfo.param.clk_src, &tmr_attr->resparam.clk_src); + sz = sizeof(tmr_attr->name); + strncpy(tmr_attr->name, poolinfo.name, sz - 1); + tmr_attr->name[sz - 1] = '\0'; + + return EM_OK; +} + +uint64_t em_timer_get_freq(em_timer_t tmr) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_GET_FREQ, + "Invalid timer:%" PRI_TMR "", tmr); + return 0; + } + + return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, + 1000ULL * 1000ULL * 1000ULL); /* 1 sec */ +} + +uint64_t em_timer_tick_to_ns(em_timer_t tmr, em_timer_tick_t ticks) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_TICK_TO_NS, + "Invalid timer:%" PRI_TMR "", tmr); + return 0; + } + + return odp_timer_tick_to_ns(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ticks); +} + +em_timer_tick_t em_timer_ns_to_tick(em_timer_t tmr, uint64_t ns) +{ + const timer_storage_t *const tmrs = &em_shm->timers; + + if (EM_CHECK_LEVEL > 0 && !is_timer_valid(tmr)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TIMER_NS_TO_TICK, + "Invalid timer:%" PRI_TMR "", tmr); + return 0; + } + + return odp_timer_ns_to_tick(tmrs->timer[TMR_H2I(tmr)].odp_tmr_pool, ns); +} + +em_tmo_state_t em_tmo_get_state(em_tmo_t tmo) +{ + if (EM_CHECK_LEVEL > 0 && unlikely(tmo == EM_TMO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo"); + return EM_TMO_STATE_UNKNOWN; + } + if (EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATE, "Invalid tmo buffer"); + return EM_TMO_STATE_UNKNOWN; + } + + return odp_atomic_load_acq_u32(&tmo->state); +} + +em_status_t em_tmo_get_stats(em_tmo_t tmo, em_tmo_stats_t *stat) +{ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo == EM_TMO_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_TMO_GET_STATS, + "Invalid tmo"); + /* check that tmo buf is valid before accessing other struct members */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !odp_buffer_is_valid(tmo->odp_buffer), + EM_ERR_BAD_ID, EM_ESCOPE_TMO_GET_STATS, + "Invalid tmo buffer"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && tmo->odp_timer == ODP_TIMER_INVALID, + EM_ERR_BAD_STATE, EM_ESCOPE_TMO_GET_STATS, + "tmo deleted?"); + + if (EM_TIMER_TMO_STATS) { + if (stat) + *stat = tmo->stats; + } else { + return EM_ERR_NOT_IMPLEMENTED; + } + + return EM_OK; +} diff --git a/src/em_atomic_group.c b/src/em_atomic_group.c index d5c70aba..f54eb99e 100644 --- a/src/em_atomic_group.c +++ b/src/em_atomic_group.c @@ -1,453 +1,459 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/** - * Atomic group inits done at global init (once at startup on one core) - */ -em_status_t -atomic_group_init(atomic_group_tbl_t *const atomic_group_tbl, - atomic_group_pool_t *const atomic_group_pool) -{ - atomic_group_elem_t *atomic_group_elem; - const int cores = em_core_count(); - int ret; - - memset(atomic_group_tbl, 0, sizeof(atomic_group_tbl_t)); - memset(atomic_group_pool, 0, sizeof(atomic_group_pool_t)); - env_atomic32_init(&em_shm->atomic_group_count); - - for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) { - em_atomic_group_t agrp = agrp_idx2hdl(i); - atomic_group_elem_t *const agrp_elem = - atomic_group_elem_get(agrp); - - if (unlikely(!agrp_elem)) - return EM_ERR_BAD_POINTER; - - agrp_elem->atomic_group = agrp; /* store handle */ - - /* Init list and lock */ - env_spinlock_init(&agrp_elem->lock); - list_init(&agrp_elem->qlist_head); - env_atomic32_init(&agrp_elem->num_queues); - } - - ret = objpool_init(&atomic_group_pool->objpool, cores); - if (ret != 0) - return EM_ERR_LIB_FAILED; - - for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) { - atomic_group_elem = &atomic_group_tbl->ag_elem[i]; - objpool_add(&atomic_group_pool->objpool, i % cores, - &atomic_group_elem->atomic_group_pool_elem); - } - - return EM_OK; -} - -static inline atomic_group_elem_t * -ag_pool_elem2ag_elem(const objpool_elem_t *const atomic_group_pool_elem) -{ - return (atomic_group_elem_t *)((uintptr_t)atomic_group_pool_elem - - offsetof(atomic_group_elem_t, atomic_group_pool_elem)); -} - -/** - * Dynamic atomic group allocation - */ -em_atomic_group_t -atomic_group_alloc(void) -{ - const atomic_group_elem_t *ag_elem; - const objpool_elem_t *ag_p_elem; - - ag_p_elem = objpool_rem(&em_shm->atomic_group_pool.objpool, - em_core_id()); - - if (unlikely(ag_p_elem == NULL)) - return EM_ATOMIC_GROUP_UNDEF; - - ag_elem = ag_pool_elem2ag_elem(ag_p_elem); - - env_atomic32_inc(&em_shm->atomic_group_count); - return ag_elem->atomic_group; -} - -em_status_t -atomic_group_free(em_atomic_group_t atomic_group) -{ - atomic_group_elem_t *agrp_elem = atomic_group_elem_get(atomic_group); - - if (unlikely(agrp_elem == NULL)) - return EM_ERR_BAD_ID; - - objpool_add(&em_shm->atomic_group_pool.objpool, - agrp_elem->atomic_group_pool_elem.subpool_idx, - &agrp_elem->atomic_group_pool_elem); - - env_atomic32_dec(&em_shm->atomic_group_count); - return EM_OK; -} - -/** - * Called by em_queue_delete() to remove the queue from the atomic group list - */ -void -atomic_group_remove_queue(queue_elem_t *const q_elem) -{ - if (!invalid_atomic_group(q_elem->atomic_group)) { - atomic_group_elem_t *const ag_elem = - atomic_group_elem_get(q_elem->atomic_group); - - atomic_group_rem_queue_list(ag_elem, q_elem); - q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; - } -} - -unsigned int -atomic_group_count(void) -{ - return env_atomic32_get(&em_shm->atomic_group_count); -} - -static inline int -ag_local_processing_ended(atomic_group_elem_t *const ag_elem) -{ - em_locm_t *const locm = &em_locm; - - /* - * Check if atomic group processing has ended for this core, meaning - * the application called em_atomic_processing_end() - */ - if (locm->atomic_group_released) { - locm->atomic_group_released = 0; - /* - * Try to acquire the atomic group lock and continue processing. - * It is possible that another core has acquired the lock - */ - if (env_spinlock_trylock(&ag_elem->lock)) - return 0; - else - return 1; - } - - return 0; -} - -static inline int -ag_internal_enq(const atomic_group_elem_t *ag_elem, const em_event_t ev_tbl[], - const int num_events, const em_queue_prio_t priority) -{ - odp_event_t odp_ev_tbl[num_events]; - odp_queue_t plain_q; - int ret; - - events_em2odp(ev_tbl, odp_ev_tbl, num_events); - - if (priority == EM_QUEUE_PRIO_HIGHEST) - plain_q = ag_elem->internal_queue.hi_prio; - else - plain_q = ag_elem->internal_queue.lo_prio; - - /* Enqueue events to internal queue */ - ret = odp_queue_enq_multi(plain_q, odp_ev_tbl, num_events); - if (unlikely(ret != num_events)) - return ret > 0 ? ret : 0; - - return num_events; -} - -static inline int -ag_internal_deq(const atomic_group_elem_t *ag_elem, em_event_t ev_tbl[/*out*/], - const int num_events) -{ - /* - * Dequeue odp events directly into ev_tbl[]. - * The function call_eo_receive_fn/multi() will convert to - * EM events with event-generation counts, if ESV is enabled, - * before passing the events to the user EO. - */ - odp_event_t *const ag_ev_tbl = (odp_event_t *const)ev_tbl; - int hi_cnt; - int lo_cnt; - - /* hi-prio events */ - hi_cnt = odp_queue_deq_multi(ag_elem->internal_queue.hi_prio, - ag_ev_tbl/*out*/, num_events); - if (hi_cnt == num_events || hi_cnt < 0) - return hi_cnt; - - /* ...then lo-prio events */ - lo_cnt = odp_queue_deq_multi(ag_elem->internal_queue.lo_prio, - &ag_ev_tbl[hi_cnt]/*out*/, - num_events - hi_cnt); - if (unlikely(lo_cnt < 0)) - return hi_cnt; - - return hi_cnt + lo_cnt; -} - -void -atomic_group_dispatch(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], - const int num_events, queue_elem_t *const q_elem) -{ - atomic_group_elem_t *const ag_elem = - atomic_group_elem_get(q_elem->atomic_group); - const em_queue_prio_t priority = q_elem->priority; - int enq_cnt; - - /* Insert the original q_elem pointer into the event header */ - for (int i = 0; i < num_events; i++) - ev_hdr_tbl[i]->q_elem = q_elem; - - /* Enqueue the scheduled events into the atomic group internal queue */ - enq_cnt = ag_internal_enq(ag_elem, ev_tbl, num_events, priority); - - if (unlikely(enq_cnt < num_events)) { - em_free_multi(&ev_tbl[enq_cnt], num_events - enq_cnt); - /* - * Use dispatch escope since this func is called only from - * dispatch_round() => atomic_group_dispatch() - */ - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_DISPATCH, - "Atomic group:%" PRI_AGRP " internal enqueue:\n" - " num_events:%d enq_cnt:%d", - ag_elem->atomic_group, num_events, enq_cnt); - } - - /* - * Try to acquire the atomic group lock - if not available then some - * other core is already handling the same atomic group. - */ - if (!env_spinlock_trylock(&ag_elem->lock)) - return; - - em_locm_t *const locm = &em_locm; - - /* hint */ - odp_schedule_release_atomic(); - - locm->atomic_group_released = 0; - /* - * Loop until no more events or until atomic processing end. - * Events in the ag_elem->internal_queue:s have been scheduled - * already once and should be dispatched asap. - */ - em_event_t deq_ev_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; - event_hdr_t *deq_hdr_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; - - do { - int deq_cnt = ag_internal_deq(ag_elem, deq_ev_tbl /*out*/, - EM_SCHED_AG_MULTI_MAX_BURST); - - if (unlikely(deq_cnt <= 0)) { - env_spinlock_unlock(&ag_elem->lock); - /* return if no more events available */ - return; - } - - locm->event_burst_cnt = deq_cnt; - event_to_hdr_multi(deq_ev_tbl, deq_hdr_tbl /*out*/, deq_cnt); - int tbl_idx = 0; /* index into 'deq_hdr_tbl[]' */ - - /* - * Dispatch in batches of 'batch_cnt' events. - * Each batch contains events from the same atomic queue. - */ - do { - queue_elem_t *const batch_qelem = - deq_hdr_tbl[tbl_idx]->q_elem; - int batch_cnt = 1; - - for (int i = tbl_idx + 1; i < deq_cnt && - deq_hdr_tbl[i]->q_elem == batch_qelem; i++) { - batch_cnt++; - } - - dispatch_events(&deq_ev_tbl[tbl_idx], - &deq_hdr_tbl[tbl_idx], - batch_cnt, batch_qelem); - tbl_idx += batch_cnt; - } while (tbl_idx < deq_cnt); - - } while (!ag_local_processing_ended(ag_elem)); -} - -#define AG_INFO_HDR_STR \ -"Number of atomic groups: %d\n\n" \ -"ID Name Qgrp Q-num\n" \ -"---------------------------------------------------------\n%s\n" - -#define AG_INFO_LEN 58 -#define AG_INFO_FMT "%-10" PRI_AGRP "%-32s%-10" PRI_QGRP "%-5d\n"/*58 characters*/ - -void print_atomic_group_info(void) -{ - unsigned int ag_num; /*atomic group number*/ - const atomic_group_elem_t *ag_elem; - em_atomic_group_t ag_check; - char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; - int len = 0; - int n_print = 0; - - em_atomic_group_t ag = em_atomic_group_get_first(&ag_num); - - /* - * ag_num might not match the actual number of atomic groups returned - * by iterating with func em_atomic_group_get_next() if atomic groups - * are added or removed in parallel by another core. Thus space for 10 - * extra atomic groups is reserved. If more than 10 atomic groups are - * added in parallel by other cores, we print only information of the - * (ag_num + 10) atomic groups. - * - * The extra 1 byte is reserved for the terminating null byte. - */ - const int ag_info_str_len = (ag_num + 10) * AG_INFO_LEN + 1; - char ag_info_str[ag_info_str_len]; - - while (ag != EM_ATOMIC_GROUP_UNDEF) { - ag_elem = atomic_group_elem_get(ag); - - em_atomic_group_get_name(ag, ag_name, sizeof(ag_name)); - - ag_check = em_atomic_group_find(ag_name); - if (unlikely(ag_elem == NULL || ag_check != ag || - !atomic_group_allocated(ag_elem))) { - ag = em_atomic_group_get_next(); - continue; - } - - n_print = snprintf(ag_info_str + len, ag_info_str_len - len, - AG_INFO_FMT, ag, ag_name, ag_elem->queue_group, - env_atomic32_get(&ag_elem->num_queues)); - - /* Not enough space to hold more atomic group info */ - if (n_print >= ag_info_str_len - len) - break; - - len += n_print; - ag = em_atomic_group_get_next(); - } - - /* No atomic group */ - if (len == 0) { - EM_PRINT("No atomic group has been created\n"); - return; - } - - /* - * To prevent printing incomplete information of the last atomic group - * when there is not enough space to hold all atomic group info. - */ - ag_info_str[len] = '\0'; - EM_PRINT(AG_INFO_HDR_STR, ag_num, ag_info_str); -} - -#define AG_QUEUE_INFO_HDR_STR \ -"Atomic group %" PRI_AGRP "(%s) has %d queue(s):\n\n" \ -"ID Name Priority Type State Qgrp Ctx\n" \ -"-----------------------------------------------------------------------------------\n" \ -"%s\n" - -#define AG_Q_INFO_LEN 85 -#define AG_Q_INFO_FMT "%-10" PRI_QUEUE "%-32s%-10d%-10s%-9s%-10" PRI_QGRP "%-3c\n" - -void print_atomic_group_queues(em_atomic_group_t ag) -{ - unsigned int q_num; - em_queue_t ag_queue; - const queue_elem_t *q_elem; - char q_name[EM_QUEUE_NAME_LEN]; - int len = 0; - int n_print = 0; - - atomic_group_elem_t *ag_elem = atomic_group_elem_get(ag); - - if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { - EM_PRINT("Atomic group %" PRI_AGRP "is not created!\n", ag); - return; - } - - ag_queue = em_atomic_group_queue_get_first(&q_num, ag); - - /* - * q_num may not match the number of queues actually returned by iterating - * with em_atomic_group_queue_get_next() if queues are added or removed - * in parallel by another core. Thus space for 10 extra queues is reserved. - * If more than 10 queues are added to this atomic group by other cores - * in parallel, we print only information of the (q_num + 10) queues. - * - * The extra 1 byte is reserved for the terminating null byte. - */ - int q_info_str_len = (q_num + 10) * AG_Q_INFO_LEN + 1; - char q_info_str[q_info_str_len]; - - while (ag_queue != EM_QUEUE_UNDEF) { - q_elem = queue_elem_get(ag_queue); - - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { - ag_queue = em_atomic_group_queue_get_next(); - continue; - } - - queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1); - - n_print = snprintf(q_info_str + len, q_info_str_len - len, - AG_Q_INFO_FMT, ag_queue, q_name, - q_elem->priority, - queue_get_type_str(q_elem->type), - queue_get_state_str(q_elem->state), - q_elem->queue_group, - q_elem->context ? 'Y' : 'N'); - - /* Not enough space to hold more queue info */ - if (n_print >= q_info_str_len - len) - break; - - len += n_print; - ag_queue = em_atomic_group_queue_get_next(); - } - - /* Atomic group has no queue */ - if (!len) { - EM_PRINT("Atomic group %" PRI_AGRP "(%s) has no queue!\n", - ag, ag_elem->name); - return; - } - - /* - * To prevent printing incomplete information of the last queue when - * there is not enough space to hold all queue info. - */ - q_info_str[len] = '\0'; - EM_PRINT(AG_QUEUE_INFO_HDR_STR, ag, ag_elem->name, q_num, q_info_str); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/** + * Atomic group inits done at global init (once at startup on one core) + */ +em_status_t +atomic_group_init(atomic_group_tbl_t *const atomic_group_tbl, + atomic_group_pool_t *const atomic_group_pool) +{ + atomic_group_elem_t *atomic_group_elem; + const int cores = em_core_count(); + int ret; + + memset(atomic_group_tbl, 0, sizeof(atomic_group_tbl_t)); + memset(atomic_group_pool, 0, sizeof(atomic_group_pool_t)); + env_atomic32_init(&em_shm->atomic_group_count); + + for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) { + em_atomic_group_t agrp = agrp_idx2hdl(i); + atomic_group_elem_t *const agrp_elem = + atomic_group_elem_get(agrp); + + if (unlikely(!agrp_elem)) + return EM_ERR_BAD_POINTER; + + agrp_elem->atomic_group = agrp; /* store handle */ + + /* Init list and lock */ + env_spinlock_init(&agrp_elem->lock); + list_init(&agrp_elem->qlist_head); + env_atomic32_init(&agrp_elem->num_queues); + } + + ret = objpool_init(&atomic_group_pool->objpool, cores); + if (ret != 0) + return EM_ERR_LIB_FAILED; + + for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) { + atomic_group_elem = &atomic_group_tbl->ag_elem[i]; + objpool_add(&atomic_group_pool->objpool, i % cores, + &atomic_group_elem->atomic_group_pool_elem); + } + + return EM_OK; +} + +static inline atomic_group_elem_t * +ag_pool_elem2ag_elem(const objpool_elem_t *const atomic_group_pool_elem) +{ + return (atomic_group_elem_t *)((uintptr_t)atomic_group_pool_elem - + offsetof(atomic_group_elem_t, atomic_group_pool_elem)); +} + +/** + * Dynamic atomic group allocation + */ +em_atomic_group_t +atomic_group_alloc(void) +{ + const atomic_group_elem_t *ag_elem; + const objpool_elem_t *ag_p_elem; + + ag_p_elem = objpool_rem(&em_shm->atomic_group_pool.objpool, + em_core_id()); + + if (unlikely(ag_p_elem == NULL)) + return EM_ATOMIC_GROUP_UNDEF; + + ag_elem = ag_pool_elem2ag_elem(ag_p_elem); + + env_atomic32_inc(&em_shm->atomic_group_count); + return ag_elem->atomic_group; +} + +em_status_t +atomic_group_free(em_atomic_group_t atomic_group) +{ + atomic_group_elem_t *agrp_elem = atomic_group_elem_get(atomic_group); + + if (unlikely(agrp_elem == NULL)) + return EM_ERR_BAD_ID; + + objpool_add(&em_shm->atomic_group_pool.objpool, + agrp_elem->atomic_group_pool_elem.subpool_idx, + &agrp_elem->atomic_group_pool_elem); + + env_atomic32_dec(&em_shm->atomic_group_count); + return EM_OK; +} + +/** + * Called by em_queue_delete() to remove the queue from the atomic group list + */ +void +atomic_group_remove_queue(queue_elem_t *const q_elem) +{ + if (!invalid_atomic_group(q_elem->atomic_group)) { + atomic_group_elem_t *const ag_elem = + atomic_group_elem_get(q_elem->atomic_group); + + atomic_group_rem_queue_list(ag_elem, q_elem); + q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; + } +} + +unsigned int +atomic_group_count(void) +{ + return env_atomic32_get(&em_shm->atomic_group_count); +} + +static inline int +ag_local_processing_ended(atomic_group_elem_t *const ag_elem) +{ + em_locm_t *const locm = &em_locm; + + /* + * Check if atomic group processing has ended for this core, meaning + * the application called em_atomic_processing_end() + */ + if (locm->atomic_group_released) { + locm->atomic_group_released = 0; + /* + * Try to acquire the atomic group lock and continue processing. + * It is possible that another core has acquired the lock + */ + if (env_spinlock_trylock(&ag_elem->lock)) + return 0; + else + return 1; + } + + return 0; +} + +static inline int +ag_internal_enq(const atomic_group_elem_t *ag_elem, const queue_elem_t *q_elem, + const em_event_t ev_tbl[], const int num_events, + const em_queue_prio_t priority) +{ + stash_entry_t entry_tbl[num_events]; + const evhdl_t *const evhdl_tbl = (const evhdl_t *const)ev_tbl; + odp_stash_t stash; + int ret; + + const uint16_t qidx = queue_hdl2idx(q_elem->queue); + + for (int i = 0; i < num_events; i++) { + entry_tbl[i].qidx = qidx; + entry_tbl[i].evptr = evhdl_tbl[i].evptr; + } + + if (priority == EM_QUEUE_PRIO_HIGHEST) + stash = ag_elem->stashes.hi_prio; + else + stash = ag_elem->stashes.lo_prio; + + /* Enqueue events to internal queue */ + ret = odp_stash_put_u64(stash, &entry_tbl[0].u64, num_events); + if (unlikely(ret != num_events)) + return ret > 0 ? ret : 0; + + return num_events; +} + +static inline int +ag_internal_deq(const atomic_group_elem_t *ag_elem, + stash_entry_t entry_tbl[/*out*/], const int num_events) +{ + /* + * The function call_eo_receive_fn/multi() will convert to + * EM events with event-generation counts, if ESV is enabled, + * before passing the events to the user EO. + */ + int32_t hi_cnt; + int32_t lo_cnt; + + /* hi-prio events */ + hi_cnt = odp_stash_get_u64(ag_elem->stashes.hi_prio, + &entry_tbl[0].u64 /*[out]*/, num_events); + if (hi_cnt == num_events || hi_cnt < 0) + return hi_cnt; + + /* ...then lo-prio events */ + lo_cnt = odp_stash_get_u64(ag_elem->stashes.lo_prio, + &entry_tbl[hi_cnt].u64 /*[out]*/, + num_events - hi_cnt); + if (unlikely(lo_cnt < 0)) + return hi_cnt; + + return hi_cnt + lo_cnt; +} + +void atomic_group_dispatch(em_event_t ev_tbl[], const int num_events, + const queue_elem_t *q_elem) +{ + atomic_group_elem_t *const ag_elem = + atomic_group_elem_get(q_elem->atomic_group); + const em_queue_prio_t priority = q_elem->priority; + int enq_cnt; + + /* Enqueue the scheduled events into the atomic group internal queue */ + enq_cnt = ag_internal_enq(ag_elem, q_elem, ev_tbl, num_events, priority); + + if (unlikely(enq_cnt < num_events)) { + em_free_multi(&ev_tbl[enq_cnt], num_events - enq_cnt); + /* + * Use dispatch escope since this func is called only from + * dispatch_round() => atomic_group_dispatch() + */ + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_DISPATCH, + "Atomic group:%" PRI_AGRP " internal enqueue:\n" + " num_events:%d enq_cnt:%d", + ag_elem->atomic_group, num_events, enq_cnt); + } + + /* + * Try to acquire the atomic group lock - if not available then some + * other core is already handling the same atomic group. + */ + if (!env_spinlock_trylock(&ag_elem->lock)) + return; + + em_locm_t *const locm = &em_locm; + + /* hint */ + odp_schedule_release_atomic(); + + locm->atomic_group_released = 0; + /* + * Loop until no more events or until atomic processing end. + * Events in the ag_elem->internal_queue:s have been scheduled + * already once and should be dispatched asap. + */ + em_event_t deq_ev_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; + event_hdr_t *deq_evhdr_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; + stash_entry_t entry_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; + + do { + int deq_cnt = ag_internal_deq(ag_elem, entry_tbl /*[out]*/, + EM_SCHED_AG_MULTI_MAX_BURST); + + if (unlikely(deq_cnt <= 0)) { + env_spinlock_unlock(&ag_elem->lock); + /* return if no more events available */ + return; + } + + for (int i = 0; i < deq_cnt; i++) + deq_ev_tbl[i] = (em_event_t)(uintptr_t)entry_tbl[i].evptr; + + locm->event_burst_cnt = deq_cnt; + event_to_hdr_multi(deq_ev_tbl, deq_evhdr_tbl /*out*/, deq_cnt); + int tbl_idx = 0; /* index into ..._tbl[] */ + + /* + * Dispatch in batches of 'batch_cnt' events. + * Each batch contains events from the same atomic queue. + */ + do { + const int qidx = entry_tbl[tbl_idx].qidx; + const em_queue_t queue = queue_idx2hdl(qidx); + queue_elem_t *const batch_qelem = queue_elem_get(queue); + + int batch_cnt = 1; + + for (int i = tbl_idx + 1; i < deq_cnt && + entry_tbl[i].qidx == qidx; i++) { + batch_cnt++; + } + + dispatch_events(&deq_ev_tbl[tbl_idx], + &deq_evhdr_tbl[tbl_idx], + batch_cnt, batch_qelem); + tbl_idx += batch_cnt; + } while (tbl_idx < deq_cnt); + + } while (!ag_local_processing_ended(ag_elem)); +} + +#define AG_INFO_HDR_STR \ +"Number of atomic groups: %d\n\n" \ +"ID Name Qgrp Q-num\n" \ +"---------------------------------------------------------\n%s\n" + +#define AG_INFO_LEN 58 +#define AG_INFO_FMT "%-10" PRI_AGRP "%-32s%-10" PRI_QGRP "%-5d\n"/*58 characters*/ + +void print_atomic_group_info(void) +{ + unsigned int ag_num; /*atomic group number*/ + const atomic_group_elem_t *ag_elem; + em_atomic_group_t ag_check; + char ag_name[EM_ATOMIC_GROUP_NAME_LEN]; + int len = 0; + int n_print = 0; + + em_atomic_group_t ag = em_atomic_group_get_first(&ag_num); + + /* + * ag_num might not match the actual number of atomic groups returned + * by iterating with func em_atomic_group_get_next() if atomic groups + * are added or removed in parallel by another core. Thus space for 10 + * extra atomic groups is reserved. If more than 10 atomic groups are + * added in parallel by other cores, we print only information of the + * (ag_num + 10) atomic groups. + * + * The extra 1 byte is reserved for the terminating null byte. + */ + const int ag_info_str_len = (ag_num + 10) * AG_INFO_LEN + 1; + char ag_info_str[ag_info_str_len]; + + while (ag != EM_ATOMIC_GROUP_UNDEF) { + ag_elem = atomic_group_elem_get(ag); + + em_atomic_group_get_name(ag, ag_name, sizeof(ag_name)); + + ag_check = em_atomic_group_find(ag_name); + if (unlikely(ag_elem == NULL || ag_check != ag || + !atomic_group_allocated(ag_elem))) { + ag = em_atomic_group_get_next(); + continue; + } + + n_print = snprintf(ag_info_str + len, ag_info_str_len - len, + AG_INFO_FMT, ag, ag_name, ag_elem->queue_group, + env_atomic32_get(&ag_elem->num_queues)); + + /* Not enough space to hold more atomic group info */ + if (n_print >= ag_info_str_len - len) + break; + + len += n_print; + ag = em_atomic_group_get_next(); + } + + /* No atomic group */ + if (len == 0) { + EM_PRINT("No atomic group has been created\n"); + return; + } + + /* + * To prevent printing incomplete information of the last atomic group + * when there is not enough space to hold all atomic group info. + */ + ag_info_str[len] = '\0'; + EM_PRINT(AG_INFO_HDR_STR, ag_num, ag_info_str); +} + +#define AG_QUEUE_INFO_HDR_STR \ +"Atomic group %" PRI_AGRP "(%s) has %d queue(s):\n\n" \ +"ID Name Priority Type State Qgrp Ctx\n" \ +"-----------------------------------------------------------------------------------\n" \ +"%s\n" + +#define AG_Q_INFO_LEN 85 +#define AG_Q_INFO_FMT "%-10" PRI_QUEUE "%-32s%-10d%-10s%-9s%-10" PRI_QGRP "%-3c\n" + +void print_atomic_group_queues(em_atomic_group_t ag) +{ + unsigned int q_num; + em_queue_t ag_queue; + const queue_elem_t *q_elem; + char q_name[EM_QUEUE_NAME_LEN]; + int len = 0; + int n_print = 0; + + atomic_group_elem_t *ag_elem = atomic_group_elem_get(ag); + + if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { + EM_PRINT("Atomic group %" PRI_AGRP "is not created!\n", ag); + return; + } + + ag_queue = em_atomic_group_queue_get_first(&q_num, ag); + + /* + * q_num may not match the number of queues actually returned by iterating + * with em_atomic_group_queue_get_next() if queues are added or removed + * in parallel by another core. Thus space for 10 extra queues is reserved. + * If more than 10 queues are added to this atomic group by other cores + * in parallel, we print only information of the (q_num + 10) queues. + * + * The extra 1 byte is reserved for the terminating null byte. + */ + int q_info_str_len = (q_num + 10) * AG_Q_INFO_LEN + 1; + char q_info_str[q_info_str_len]; + + while (ag_queue != EM_QUEUE_UNDEF) { + q_elem = queue_elem_get(ag_queue); + + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { + ag_queue = em_atomic_group_queue_get_next(); + continue; + } + + queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1); + + n_print = snprintf(q_info_str + len, q_info_str_len - len, + AG_Q_INFO_FMT, ag_queue, q_name, + q_elem->priority, + queue_get_type_str(q_elem->type), + queue_get_state_str(q_elem->state), + q_elem->queue_group, + q_elem->context ? 'Y' : 'N'); + + /* Not enough space to hold more queue info */ + if (n_print >= q_info_str_len - len) + break; + + len += n_print; + ag_queue = em_atomic_group_queue_get_next(); + } + + /* Atomic group has no queue */ + if (!len) { + EM_PRINT("Atomic group %" PRI_AGRP "(%s) has no queue!\n", + ag, ag_elem->name); + return; + } + + /* + * To prevent printing incomplete information of the last queue when + * there is not enough space to hold all queue info. + */ + q_info_str[len] = '\0'; + EM_PRINT(AG_QUEUE_INFO_HDR_STR, ag, ag_elem->name, q_num, q_info_str); +} diff --git a/src/em_atomic_group.h b/src/em_atomic_group.h index a8e30da6..09db1f52 100644 --- a/src/em_atomic_group.h +++ b/src/em_atomic_group.h @@ -1,148 +1,140 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal atomic group functions - * - */ - -#ifndef EM_ATOMIC_GROUP_H_ -#define EM_ATOMIC_GROUP_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#define invalid_atomic_group(atomic_group) \ - ((unsigned int)agrp_hdl2idx((atomic_group)) >= EM_MAX_ATOMIC_GROUPS) - -em_status_t -atomic_group_init(atomic_group_tbl_t *const atomic_group_tbl, - atomic_group_pool_t *const atomic_group_pool); - -em_atomic_group_t -atomic_group_alloc(void); - -em_status_t -atomic_group_free(em_atomic_group_t atomic_group); - -int -atomic_group_available(atomic_group_elem_t *const ag_elem); - -int -atomic_group_again(atomic_group_elem_t *const ag_elem); - -void -atomic_group_remove_queue(queue_elem_t *const q_elem); - -void -atomic_group_dispatch(em_event_t ev_tbl[], event_hdr_t *const ev_hdr_tbl[], - const int num_events, queue_elem_t *const q_elem); - -static inline int -atomic_group_allocated(const atomic_group_elem_t *agrp_elem) -{ - return !objpool_in_pool(&agrp_elem->atomic_group_pool_elem); -} - -static inline int -agrp_hdl2idx(const em_atomic_group_t atomic_group) -{ - return (int)((uintptr_t)atomic_group - 1); -} - -static inline em_atomic_group_t -agrp_idx2hdl(const int atomic_group_index) -{ - return (em_atomic_group_t)(uintptr_t)(atomic_group_index + 1); -} - -static inline atomic_group_elem_t * -atomic_group_elem_get(const em_atomic_group_t atomic_group) -{ - const int ag_idx = agrp_hdl2idx(atomic_group); - atomic_group_elem_t *ag_elem; - - if (unlikely((unsigned int)ag_idx > EM_MAX_ATOMIC_GROUPS - 1)) - return NULL; - - ag_elem = &em_shm->atomic_group_tbl.ag_elem[ag_idx]; - - return ag_elem; -} - -static inline void -atomic_group_add_queue_list(atomic_group_elem_t *const ag_elem, - queue_elem_t *const q_elem) -{ - env_spinlock_lock(&ag_elem->lock); - list_add(&ag_elem->qlist_head, &q_elem->agrp.agrp_node); - env_atomic32_inc(&ag_elem->num_queues); - env_spinlock_unlock(&ag_elem->lock); -} - -static inline void -atomic_group_rem_queue_list(atomic_group_elem_t *const ag_elem, - queue_elem_t *const q_elem) -{ - env_spinlock_lock(&ag_elem->lock); - if (!list_is_empty(&ag_elem->qlist_head)) { - list_rem(&ag_elem->qlist_head, &q_elem->agrp.agrp_node); - env_atomic32_dec(&ag_elem->num_queues); - } - env_spinlock_unlock(&ag_elem->lock); -} - -static inline void -atomic_group_release(void) -{ - em_locm_t *const locm = &em_locm; - atomic_group_elem_t *const agrp_elem = - atomic_group_elem_get(locm->current.sched_q_elem->atomic_group); - - locm->atomic_group_released = 1; - env_spinlock_unlock(&agrp_elem->lock); -} - -unsigned int -atomic_group_count(void); - -/** Print information about all atomic groups */ -void print_atomic_group_info(void); - -/** Print information about all queues of the given atomic group */ -void print_atomic_group_queues(em_atomic_group_t ag); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_ATOMIC_GROUP_H_ */ +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal atomic group functions + * + */ + +#ifndef EM_ATOMIC_GROUP_H_ +#define EM_ATOMIC_GROUP_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define invalid_atomic_group(atomic_group) \ + ((unsigned int)agrp_hdl2idx((atomic_group)) >= EM_MAX_ATOMIC_GROUPS) + +em_status_t +atomic_group_init(atomic_group_tbl_t *const atomic_group_tbl, + atomic_group_pool_t *const atomic_group_pool); + +em_atomic_group_t +atomic_group_alloc(void); + +em_status_t +atomic_group_free(em_atomic_group_t atomic_group); + +void atomic_group_remove_queue(queue_elem_t *const q_elem); + +void atomic_group_dispatch(em_event_t ev_tbl[], const int num_events, + const queue_elem_t *q_elem); + +static inline int +atomic_group_allocated(const atomic_group_elem_t *agrp_elem) +{ + return !objpool_in_pool(&agrp_elem->atomic_group_pool_elem); +} + +static inline int +agrp_hdl2idx(const em_atomic_group_t atomic_group) +{ + return (int)((uintptr_t)atomic_group - 1); +} + +static inline em_atomic_group_t +agrp_idx2hdl(const int atomic_group_index) +{ + return (em_atomic_group_t)(uintptr_t)(atomic_group_index + 1); +} + +static inline atomic_group_elem_t * +atomic_group_elem_get(const em_atomic_group_t atomic_group) +{ + const int ag_idx = agrp_hdl2idx(atomic_group); + atomic_group_elem_t *ag_elem; + + if (unlikely((unsigned int)ag_idx > EM_MAX_ATOMIC_GROUPS - 1)) + return NULL; + + ag_elem = &em_shm->atomic_group_tbl.ag_elem[ag_idx]; + + return ag_elem; +} + +static inline void +atomic_group_add_queue_list(atomic_group_elem_t *const ag_elem, + queue_elem_t *const q_elem) +{ + env_spinlock_lock(&ag_elem->lock); + list_add(&ag_elem->qlist_head, &q_elem->agrp.agrp_node); + env_atomic32_inc(&ag_elem->num_queues); + env_spinlock_unlock(&ag_elem->lock); +} + +static inline void +atomic_group_rem_queue_list(atomic_group_elem_t *const ag_elem, + queue_elem_t *const q_elem) +{ + env_spinlock_lock(&ag_elem->lock); + if (!list_is_empty(&ag_elem->qlist_head)) { + list_rem(&ag_elem->qlist_head, &q_elem->agrp.agrp_node); + env_atomic32_dec(&ag_elem->num_queues); + } + env_spinlock_unlock(&ag_elem->lock); +} + +static inline void +atomic_group_release(void) +{ + em_locm_t *const locm = &em_locm; + atomic_group_elem_t *const agrp_elem = + atomic_group_elem_get(locm->current.sched_q_elem->atomic_group); + + locm->atomic_group_released = 1; + env_spinlock_unlock(&agrp_elem->lock); +} + +unsigned int +atomic_group_count(void); + +/** Print information about all atomic groups */ +void print_atomic_group_info(void); + +/** Print information about all queues of the given atomic group */ +void print_atomic_group_queues(em_atomic_group_t ag); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_ATOMIC_GROUP_H_ */ diff --git a/src/em_atomic_group_types.h b/src/em_atomic_group_types.h index c99eede1..3718162e 100644 --- a/src/em_atomic_group_types.h +++ b/src/em_atomic_group_types.h @@ -1,90 +1,90 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal atomic group types & definitions - * - */ - -#ifndef EM_ATOMIC_GROUP_TYPES_H_ -#define EM_ATOMIC_GROUP_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#define EVENT_CACHE_FLUSH 32 - -typedef struct { - /** Atomic group name */ - char name[EM_ATOMIC_GROUP_NAME_LEN]; - /** The atomic group ID (handle) */ - em_atomic_group_t atomic_group; - /** Queue group that the atomic group belongs to */ - em_queue_group_t queue_group; - /** AG pool elem for linking free AGs for AG-alloc */ - objpool_elem_t atomic_group_pool_elem; - /** Internal plain queues for events beloning to this group */ - struct { - /** for high priority events */ - odp_queue_t hi_prio; - /** for events of all other priority levels */ - odp_queue_t lo_prio; - } internal_queue; - - /** Atomic group element lock */ - env_spinlock_t lock ENV_CACHE_LINE_ALIGNED; - /** List of queues (q_elems) that belong to this atomic group */ - list_node_t qlist_head; - /** Number of queues that belong to this atomic group */ - env_atomic32_t num_queues; -} atomic_group_elem_t ENV_CACHE_LINE_ALIGNED; - -/** - * Atomic group table - */ -typedef struct { - /** Atomic group element table */ - atomic_group_elem_t ag_elem[EM_MAX_ATOMIC_GROUPS]; -} atomic_group_tbl_t; - -/** - * Pool of free atomic groups - */ -typedef struct { - objpool_t objpool; -} atomic_group_pool_t; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_ATOMIC_GROUP_TYPES_H_ */ +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal atomic group types & definitions + * + */ + +#ifndef EM_ATOMIC_GROUP_TYPES_H_ +#define EM_ATOMIC_GROUP_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define EVENT_CACHE_FLUSH 32 + +typedef struct { + /** Atomic group name */ + char name[EM_ATOMIC_GROUP_NAME_LEN]; + /** The atomic group ID (handle) */ + em_atomic_group_t atomic_group; + /** Queue group that the atomic group belongs to */ + em_queue_group_t queue_group; + /** AG pool elem for linking free AGs for AG-alloc */ + objpool_elem_t atomic_group_pool_elem; + /** Internal stashes for events belonging to this group */ + struct { + /** for high priority events */ + odp_stash_t hi_prio; + /** for events of all other priority levels */ + odp_stash_t lo_prio; + } stashes; + + /** Atomic group element lock */ + env_spinlock_t lock ENV_CACHE_LINE_ALIGNED; + /** List of queues (q_elems) that belong to this atomic group */ + list_node_t qlist_head; + /** Number of queues that belong to this atomic group */ + env_atomic32_t num_queues; +} atomic_group_elem_t ENV_CACHE_LINE_ALIGNED; + +/** + * Atomic group table + */ +typedef struct { + /** Atomic group element table */ + atomic_group_elem_t ag_elem[EM_MAX_ATOMIC_GROUPS]; +} atomic_group_tbl_t; + +/** + * Pool of free atomic groups + */ +typedef struct { + objpool_t objpool; +} atomic_group_pool_t; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_ATOMIC_GROUP_TYPES_H_ */ diff --git a/src/em_chaining.c b/src/em_chaining.c index 31ce22c3..79551d0f 100644 --- a/src/em_chaining.c +++ b/src/em_chaining.c @@ -1,230 +1,256 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* em_output_func_t for event-chaining output*/ -int chaining_output(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args); - -/** - * This function is declared as a weak symbol in em_chaining.h, meaning that the - * user can override it during linking with another implementation. - */ -em_status_t -event_send_device(em_event_t event, em_queue_t queue) -{ - internal_queue_t iq = {.queue = queue}; - - (void)event; - return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_EVENT_SEND_DEVICE, - "No %s() function given!\t" - "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", - __func__, iq.device_id, iq.queue_id); -} - -/** - * This function is declared as a weak symbol in em_chaining.h, meaning that the - * user can override it during linking with another implementation. - */ -int -event_send_device_multi(const em_event_t events[], int num, em_queue_t queue) -{ - internal_queue_t iq = {.queue = queue}; - - (void)events; - (void)num; - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_EVENT_SEND_DEVICE_MULTI, - "No %s() function given!\t" - "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", - __func__, iq.device_id, iq.queue_id); - return 0; -} - -static int -read_config_file(void) -{ - const char *conf_str; - int val = 0; - int ret; - - EM_PRINT("EM Event-Chaining config:\n"); - - /* - * Option: event_chaining.num_order_queues - */ - conf_str = "event_chaining.num_order_queues"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0 || val > MAX_CHAINING_OUTPUT_QUEUES) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d' (max: %d)\n", - conf_str, val, MAX_CHAINING_OUTPUT_QUEUES); - return -1; - } - /* store & print the value */ - em_shm->opt.event_chaining.num_order_queues = val; - EM_PRINT(" %s: %d (max: %d)\n", conf_str, val, - MAX_CHAINING_OUTPUT_QUEUES); - - return 0; -} - -em_status_t -chaining_init(event_chaining_t *event_chaining) -{ - em_queue_conf_t queue_conf; - em_output_queue_conf_t output_conf; - em_queue_t output_queue; - unsigned int i; - - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - event_chaining->num_output_queues = 0; - for (i = 0; i < MAX_CHAINING_OUTPUT_QUEUES; i++) - event_chaining->output_queues[i] = EM_QUEUE_UNDEF; - - memset(&queue_conf, 0, sizeof(queue_conf)); - memset(&output_conf, 0, sizeof(output_conf)); - - queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; - queue_conf.min_events = 0; /* system default */ - queue_conf.conf_len = sizeof(output_conf); - queue_conf.conf = &output_conf; - /* Set output-queue callback function, no args needed */ - output_conf.output_fn = chaining_output; - output_conf.output_fn_args = NULL; - output_conf.args_len = 0; - - const unsigned int num = em_shm->opt.event_chaining.num_order_queues; - unsigned char idx = 0; - - for (i = 0; i < num; i++) { - char name[EM_QUEUE_NAME_LEN]; - - snprintf(name, sizeof(name), "Event-Chaining-Output-%02u", idx); - idx++; - name[sizeof(name) - 1] = '\0'; - - output_queue = em_queue_create(name, EM_QUEUE_TYPE_OUTPUT, - EM_QUEUE_PRIO_UNDEF, - EM_QUEUE_GROUP_UNDEF, - &queue_conf); - if (unlikely(output_queue == EM_QUEUE_UNDEF)) - return EM_ERR_ALLOC_FAILED; - - event_chaining->num_output_queues++; - event_chaining->output_queues[i] = output_queue; - } - - return EM_OK; -} - -em_status_t -chaining_term(const event_chaining_t *event_chaining) -{ - const unsigned int num = event_chaining->num_output_queues; - em_queue_t output_queue; - em_status_t stat; - - for (unsigned int i = 0; i < num; i++) { - output_queue = event_chaining->output_queues[i]; - /* delete the output queues associated with event chaining */ - stat = em_queue_delete(output_queue); - if (unlikely(stat != EM_OK)) - return stat; - } - - return EM_OK; -} - -/** - * Output-queue callback function of type 'em_output_func_t' for Event-Chaining. - */ -int -chaining_output(const em_event_t events[], const unsigned int num, - const em_queue_t output_queue, void *output_fn_args) -{ - em_queue_t chaining_queue; - - (void)output_queue; - (void)output_fn_args; - - if (unlikely(num <= 0)) - return 0; - - if (num == 1) { - const event_hdr_t *ev_hdr = event_to_hdr(events[0]); - em_status_t stat; - - chaining_queue = ev_hdr->queue; - stat = event_send_device(events[0], chaining_queue); - if (unlikely(stat != EM_OK)) - return 0; - return 1; - } - - /* - * num > 1: - * Dispatch events in batches. Each batch contains events targeted for - * the same chaining queue. - */ - event_hdr_t *ev_hdrs[num]; - unsigned int idx = 0; /* index into 'events[]' and 'ev_hdrs[]' */ - - event_to_hdr_multi(events, ev_hdrs, num); - - do { - chaining_queue = ev_hdrs[idx]->queue; - int batch_cnt = 1; - int ret; - - for (unsigned int i = idx + 1; i < num && - ev_hdrs[i]->queue == chaining_queue; i++) { - batch_cnt++; - } - - ret = event_send_device_multi(&events[idx], batch_cnt, - chaining_queue); - if (unlikely(ret != batch_cnt)) { - if (ret < 0) - return idx; - return idx + ret; - } - idx += batch_cnt; - } while (idx < num); - - return num; -} +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* em_output_func_t for event-chaining output*/ +int chaining_output(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args); + +/** + * This function is declared as a weak symbol in em_chaining.h, meaning that the + * user can override it during linking with another implementation. + */ +em_status_t +event_send_device(em_event_t event, em_queue_t queue) +{ + internal_queue_t iq = {.queue = queue}; + + (void)event; + return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_EVENT_SEND_DEVICE, + "No %s() function given!\t" + "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", + __func__, iq.device_id, iq.queue_id); +} + +/** + * This function is declared as a weak symbol in em_chaining.h, meaning that the + * user can override it during linking with another implementation. + */ +int +event_send_device_multi(const em_event_t events[], int num, em_queue_t queue) +{ + internal_queue_t iq = {.queue = queue}; + + (void)events; + (void)num; + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_EVENT_SEND_DEVICE_MULTI, + "No %s() function given!\t" + "device:0x%" PRIx16 " Q-id:0x%" PRIx16 "\n", + __func__, iq.device_id, iq.queue_id); + return 0; +} + +static int +read_config_file(void) +{ + const char *conf_str; + int val = 0; + bool val_bool = false; + int ret; + + /* Zero all options first */ + memset(&em_shm->opt.event_chaining, 0, sizeof(em_shm->opt.event_chaining)); + + EM_PRINT("EM Event-Chaining config:\n"); + /* + * Option: event_chaining.order_keep - runtime enable/disable + */ + conf_str = "event_chaining.order_keep"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.event_chaining.order_keep = val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* Read no more options if ordering is disabled */ + if (!em_shm->opt.event_chaining.order_keep) + return 0; /* Note! */ + + /* Temporary: Event chaining re-ordering not yet supported */ + if (unlikely(em_shm->opt.event_chaining.order_keep)) { + EM_LOG(EM_LOG_ERR, + "Config option %s: %s(%d) currently not supported\n", + conf_str, val_bool ? "true" : "false", val_bool); + return -1; + } + + /* + * Option: event_chaining.num_order_queues + * (only read if .order_keep == true above) + */ + conf_str = "event_chaining.num_order_queues"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0 || val > MAX_CHAINING_OUTPUT_QUEUES) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d' (max: %d)\n", + conf_str, val, MAX_CHAINING_OUTPUT_QUEUES); + return -1; + } + /* store & print the value */ + em_shm->opt.event_chaining.num_order_queues = val; + EM_PRINT(" %s: %d (max: %d)\n", conf_str, val, + MAX_CHAINING_OUTPUT_QUEUES); + + return 0; +} + +em_status_t +chaining_init(event_chaining_t *event_chaining) +{ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + /* Remains '0' if 'event_chaining.order_keep = false' in config file */ + event_chaining->num_output_queues = 0; + + for (unsigned int i = 0; i < MAX_CHAINING_OUTPUT_QUEUES; i++) + event_chaining->output_queues[i] = EM_QUEUE_UNDEF; + + if (!em_shm->opt.event_chaining.order_keep) + return EM_OK; /* don't create output queues for event chaining */ + + /* + * Create EM output queues for event chaining, needed to maintain event + * order during an ordered context + */ + em_queue_conf_t queue_conf; + em_output_queue_conf_t output_conf; + + memset(&queue_conf, 0, sizeof(queue_conf)); + memset(&output_conf, 0, sizeof(output_conf)); + + queue_conf.flags = EM_QUEUE_FLAG_DEFAULT; + queue_conf.min_events = 0; /* system default */ + queue_conf.conf_len = sizeof(output_conf); + queue_conf.conf = &output_conf; + /* Set output-queue callback function, no args needed */ + output_conf.output_fn = chaining_output; + output_conf.output_fn_args = NULL; + output_conf.args_len = 0; + + const unsigned int num = em_shm->opt.event_chaining.num_order_queues; + unsigned char idx = 0; + + for (unsigned int i = 0; i < num; i++) { + char name[EM_QUEUE_NAME_LEN]; + + snprintf(name, sizeof(name), "Event-Chaining-Output-%02u", idx); + idx++; + name[sizeof(name) - 1] = '\0'; + + em_queue_t output_queue = em_queue_create(name, + EM_QUEUE_TYPE_OUTPUT, + EM_QUEUE_PRIO_UNDEF, + EM_QUEUE_GROUP_UNDEF, + &queue_conf); + if (unlikely(output_queue == EM_QUEUE_UNDEF)) + return EM_ERR_ALLOC_FAILED; + + event_chaining->num_output_queues++; + event_chaining->output_queues[i] = output_queue; + } + + return EM_OK; +} + +em_status_t +chaining_term(const event_chaining_t *event_chaining) +{ + /* num = 0 if 'event_chaining.order_keep = false' in config file */ + const unsigned int num = event_chaining->num_output_queues; + + for (unsigned int i = 0; i < num; i++) { + em_queue_t output_queue = event_chaining->output_queues[i]; + /* delete the output queues associated with event chaining */ + em_status_t stat = em_queue_delete(output_queue); + + if (unlikely(stat != EM_OK)) + return stat; + } + + return EM_OK; +} + +/** + * Output-queue callback function of type 'em_output_func_t' for Event-Chaining. + * Only needed when sending during an ordered-context when the EM config file + * option is set to 'event_chaining.order_keep = true'. + */ +int +chaining_output(const em_event_t events[], const unsigned int num, + const em_queue_t output_queue, void *output_fn_args) +{ + /* + * NOTE! + * Temporary: Not supporting the EM config file option + * 'event_chaining.order_keep = true' at the moment, checked during + * chaining_init() -> read_config_file(). + * This function will thus not be called until support added. + */ + em_queue_t chaining_queue = EM_QUEUE_UNDEF; + + (void)output_queue; + (void)output_fn_args; + + if (unlikely(num <= 0)) + return 0; + + if (num == 1) { + em_status_t stat = event_send_device(events[0], chaining_queue); + + if (unlikely(stat != EM_OK)) + return 0; + return 1; + } + + /* + * num > 1: + */ + int ret = event_send_device_multi(events, num, chaining_queue); + + if (unlikely((unsigned int)ret != num)) { + if (ret < 0) + return 0; + else + return ret; + } + + return num; +} diff --git a/src/em_chaining.h b/src/em_chaining.h index a0eba9c9..51008b58 100644 --- a/src/em_chaining.h +++ b/src/em_chaining.h @@ -1,247 +1,203 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * EM event chaining support - */ - -#ifndef EM_CHAINING_H_ -#define EM_CHAINING_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#pragma GCC visibility push(default) -/** - * This function is declared as a weak symbol, indicating that the user should - * override it during linking with another implementation if event chaining is - * used. - */ -__attribute__((weak)) -em_status_t event_send_device(em_event_t event, em_queue_t queue); -/** - * This function is declared as a weak symbol, indicating that the user should - * override it during linking with another implementation if event chaining is - * used. - */ -__attribute__((weak)) -int event_send_device_multi(const em_event_t events[], int num, - em_queue_t queue); -#pragma GCC visibility pop - -/** - * Initialize event chaining during start-up - */ -em_status_t -chaining_init(event_chaining_t *event_chaining); - -/** - * Terminate event chaining during shut-down - */ -em_status_t -chaining_term(const event_chaining_t *event_chaining); - -/** - * Send an event to out of EM (e.g. to another device) via event-chaining and a - * user-provided function 'event_send_device()'. - * @see event_send_device() - */ -static inline em_status_t -send_chaining(em_event_t event, event_hdr_t *const ev_hdr, - em_queue_t chaining_queue) -{ - const unsigned int num_outq = em_shm->event_chaining.num_output_queues; - const em_sched_context_type_t sched_ctx_type = - em_locm.current.sched_context_type; - - if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) { - if (!esv_enabled()) - return event_send_device(event, chaining_queue); - /* - * ESV enabled: - */ - em_status_t status; - - event = evstate_em2usr(event, ev_hdr, EVSTATE__OUTPUT_CHAINING); - status = event_send_device(event, chaining_queue); - if (likely(status == EM_OK)) - return EM_OK; - /* error: */ - event = evstate_em2usr_revert(event, ev_hdr, - EVSTATE__OUTPUT_CHAINING__FAIL); - return status; - } - - /* store destination event-chaining queue */ - ev_hdr->queue = chaining_queue; - - /* always use the same output queue for each chaining queue */ - const internal_queue_t iq = {.queue = chaining_queue}; - em_queue_t output_queue; - queue_elem_t *output_q_elem; - uint32_t idx; - - idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; - output_queue = em_shm->event_chaining.output_queues[idx]; - output_q_elem = queue_elem_get(output_queue); - - return send_output(event, ev_hdr, output_q_elem); -} - -/** - * Send 'num' events out of EM (e.g. to another device) via event-chaining and a - * user-provided function 'event_send_device_multi()'. - * @see event_send_device_multi() - */ -static inline int -send_chaining_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], - const int num, em_queue_t chaining_queue) -{ - const unsigned int num_outq = em_shm->event_chaining.num_output_queues; - const em_sched_context_type_t sched_ctx_type = - em_locm.current.sched_context_type; - - if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) { - if (!esv_enabled()) - return event_send_device_multi(events, num, - chaining_queue); - /* - * ESV enabled: - */ - em_event_t tmp_events[num]; - - /* need copy, don't change "const events[]" */ - for (int i = 0; i < num; i++) - tmp_events[i] = events[i]; - evstate_em2usr_multi(tmp_events/*in/out*/, ev_hdrs, num, - EVSTATE__OUTPUT_CHAINING_MULTI); - int num_sent = event_send_device_multi(tmp_events, num, - chaining_queue); - if (unlikely(num_sent < num && num_sent >= 0)) { - evstate_em2usr_revert_multi(&tmp_events[num_sent]/*in/out*/, - &ev_hdrs[num_sent], num - num_sent, - EVSTATE__OUTPUT_CHAINING_MULTI__FAIL); - } - return num_sent; - } - - /* store destination event chaining queue */ - for (int i = 0; i < num; i++) - ev_hdrs[i]->queue = chaining_queue; - - /* always use the same output queue for each chaining queue */ - const internal_queue_t iq = {.queue = chaining_queue}; - em_queue_t output_queue; - queue_elem_t *output_q_elem; - uint32_t idx; - - idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; - output_queue = em_shm->event_chaining.output_queues[idx]; - output_q_elem = queue_elem_get(output_queue); - - return send_output_multi(events, ev_hdrs, num, output_q_elem); -} - -/** - * Send an event tagged with an event group out of EM (e.g. to another device) - * via event-chaining and a user-provided function 'event_send_device()'. - * @see event_send_device() - */ -static inline em_status_t -send_chaining_egrp(em_event_t event, event_hdr_t *const ev_hdr, - em_queue_t chaining_queue, - const event_group_elem_t *egrp_elem) -{ - if (!egrp_elem) - return send_chaining(event, ev_hdr, chaining_queue); - - em_event_group_t save_egrp; - event_group_elem_t *save_egrp_elem; - int32_t save_egrp_gen; - - /* Send to another DEVICE with an event group */ - save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); - /* - * "Simulate" a dispatch round from evgrp perspective, - * send-device() instead of EO-receive() - */ - event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); - - em_status_t stat = send_chaining(event, ev_hdr, chaining_queue); - - event_group_count_decrement(1); - restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); - - return stat; -} - -/** - * Send 'num' events tagged with an event group out of EM (e.g. to another device) - * via event-chaining and a user-provided function 'event_send_device_multi()'. - * @see event_send_device_multi() - */ -static inline int -send_chaining_egrp_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], - const int num, em_queue_t chaining_queue, - const event_group_elem_t *egrp_elem) -{ - if (!egrp_elem) - return send_chaining_multi(events, ev_hdrs, num, chaining_queue); - - em_event_group_t save_egrp; - event_group_elem_t *save_egrp_elem; - int32_t save_egrp_gen; - - /* Send to another DEVICE with an event group */ - save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); - /* - * "Simulate" dispatch rounds from evgrp perspective, - * send-device() instead of EO-receive(). - * Decrement evgrp-count by 'num' instead of by '1'. - * Note: event_group_set_local() called only once for - * all events. - */ - event_group_set_local(ev_hdrs[0]->egrp, ev_hdrs[0]->egrp_gen, num); - - int num_sent = send_chaining_multi(events, ev_hdrs, num, chaining_queue); - - event_group_count_decrement(num); - restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); - - return num_sent; -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_CHAINING_H_ */ +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM event chaining support + */ + +#ifndef EM_CHAINING_H_ +#define EM_CHAINING_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#pragma GCC visibility push(default) +/** + * This function is declared as a weak symbol, indicating that the user should + * override it during linking with another implementation if event chaining is + * used. + */ +__attribute__((weak)) +em_status_t event_send_device(em_event_t event, em_queue_t queue); +/** + * This function is declared as a weak symbol, indicating that the user should + * override it during linking with another implementation if event chaining is + * used. + */ +__attribute__((weak)) +int event_send_device_multi(const em_event_t events[], int num, + em_queue_t queue); +#pragma GCC visibility pop + +/** + * Initialize event chaining during start-up + */ +em_status_t +chaining_init(event_chaining_t *event_chaining); + +/** + * Terminate event chaining during shut-down + */ +em_status_t +chaining_term(const event_chaining_t *event_chaining); + +/** + * Send an event to out of EM (e.g. to another device) via event-chaining and a + * user-provided function 'event_send_device()'. + * @see event_send_device() + */ +static inline em_status_t +send_chaining(em_event_t event, em_queue_t chaining_queue) +{ + const unsigned int num_outq = em_shm->event_chaining.num_output_queues; + const em_sched_context_type_t sched_ctx_type = + em_locm.current.sched_context_type; + + if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) + return event_send_device(event, chaining_queue); + + /* always use the same output queue for each chaining queue */ + const internal_queue_t iq = {.queue = chaining_queue}; + em_queue_t output_queue; + queue_elem_t *output_q_elem; + uint32_t idx; + + idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; + output_queue = em_shm->event_chaining.output_queues[idx]; + output_q_elem = queue_elem_get(output_queue); + + return send_output(event, output_q_elem); +} + +/** + * Send 'num' events out of EM (e.g. to another device) via event-chaining and a + * user-provided function 'event_send_device_multi()'. + * @see event_send_device_multi() + */ +static inline int +send_chaining_multi(const em_event_t events[], const int num, + em_queue_t chaining_queue) +{ + const unsigned int num_outq = em_shm->event_chaining.num_output_queues; + const em_sched_context_type_t sched_ctx_type = + em_locm.current.sched_context_type; + + if (num_outq == 0 || sched_ctx_type != EM_SCHED_CONTEXT_TYPE_ORDERED) + return event_send_device_multi(events, num, chaining_queue); + + /* always use the same output queue for each chaining queue */ + const internal_queue_t iq = {.queue = chaining_queue}; + em_queue_t output_queue; + queue_elem_t *output_q_elem; + uint32_t idx; + + idx = ((uint32_t)iq.device_id + (uint32_t)iq.queue_id) % num_outq; + output_queue = em_shm->event_chaining.output_queues[idx]; + output_q_elem = queue_elem_get(output_queue); + + return send_output_multi(events, num, output_q_elem); +} + +/** + * Send an event tagged with an event group out of EM (e.g. to another device) + * via event-chaining and a user-provided function 'event_send_device()'. + * @see event_send_device() + */ +static inline em_status_t +send_chaining_egrp(em_event_t event, event_hdr_t *const ev_hdr, + em_queue_t chaining_queue, + const event_group_elem_t *egrp_elem) +{ + if (!egrp_elem) + return send_chaining(event, chaining_queue); + + em_event_group_t save_egrp; + event_group_elem_t *save_egrp_elem; + int32_t save_egrp_gen; + + /* Send to another DEVICE with an event group */ + save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); + /* + * "Simulate" a dispatch round from evgrp perspective, + * send-device() instead of EO-receive() + */ + event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); + + em_status_t stat = send_chaining(event, chaining_queue); + + event_group_count_decrement(1); + restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); + + return stat; +} + +/** + * Send 'num' events tagged with an event group out of EM (e.g. to another device) + * via event-chaining and a user-provided function 'event_send_device_multi()'. + * @see event_send_device_multi() + */ +static inline int +send_chaining_egrp_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], + const int num, em_queue_t chaining_queue, + const event_group_elem_t *egrp_elem) +{ + if (!egrp_elem) + return send_chaining_multi(events, num, chaining_queue); + + em_event_group_t save_egrp; + event_group_elem_t *save_egrp_elem; + int32_t save_egrp_gen; + + /* Send to another DEVICE with an event group */ + save_current_evgrp(&save_egrp, &save_egrp_elem, &save_egrp_gen); + /* + * "Simulate" dispatch rounds from evgrp perspective, + * send-device() instead of EO-receive(). + * Decrement evgrp-count by 'num' instead of by '1'. + * Note: event_group_set_local() called only once for + * all events. + */ + event_group_set_local(ev_hdrs[0]->egrp, ev_hdrs[0]->egrp_gen, num); + + int num_sent = send_chaining_multi(events, num, chaining_queue); + + event_group_count_decrement(num); + restore_current_evgrp(save_egrp, save_egrp_elem, save_egrp_gen); + + return num_sent; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_CHAINING_H_ */ diff --git a/src/em_cli.c b/src/em_cli.c index d98808ac..5fc758fb 100644 --- a/src/em_cli.c +++ b/src/em_cli.c @@ -1,1082 +1,1082 @@ -/* Copyright (c) 2021, Nokia - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include "em_include.h" - -#if EM_CLI - -#define OPTPARSE_IMPLEMENTATION -#include "misc/optparse.h" - -/* Maximum number of bytes (including terminating null byte) for an EM CLI command */ -#define MAX_CMD_LEN 20 - -/* EM CLI shared memory */ -static em_cli_shm_t *cli_shm; - -__attribute__((format(printf, 2, 3))) -static int cli_log(em_log_level_t level, const char *fmt, ...) -{ - (void)level; - - va_list args; - - va_start(args, fmt); - - int r = odph_cli_log_va(fmt, args); - - va_end(args); - - return r; -} - -static void print_em_info_help(void) -{ - const char *usage = "Usage: em_info_print [OPTION]\n" - "Print EM related information.\n" - "\n" - "Options:\n" - " -a, --all\tPrint all EM info\n" - " -p, --cpu-arch\tPrint cpu architure\n" - " -c, --conf\tPrint default and runtime configurations\n" - " -h, --help\tDisplay this help\n"; - odph_cli_log(usage); -} - -static void print_em_info_all(void) -{ - core_log_fn_set(cli_log); - print_em_info(); - core_log_fn_set(NULL); -} - -static void print_em_info_cpu_arch(void) -{ - core_log_fn_set(cli_log); - print_cpu_arch_info(); - core_log_fn_set(NULL); -} - -static void print_em_info_conf(void) -{ - core_log_fn_set(cli_log); - em_libconfig_print(&em_shm->libconfig); - core_log_fn_set(NULL); -} - -static void cmd_em_info_print(int argc, char *argv[]) -{ - /* All current options accept no argument */ - const int max_args = 1; - - /* When no argument is given, print all EM info */ - if (argc == 0) { - print_em_info_all(); - return; - } else if (argc > max_args) { - odph_cli_log("Error: extra parameter given to command!\n"); - return; - } - - /* Unlike getopt, optparse does not require an argument count as input to - * indicate the number of arguments in argv. Instead, it uses NULL pointer - * to decide the end of argument array argv. - * - * argv here contains only CLI command options. To emulate a real command, - * argv_new is constructed to include command name. - */ - argc += 1/*Command name*/ + 1/*Terminating NULL pointer*/; - char *argv_new[argc]; - char cmd[MAX_CMD_LEN] = "em_info_print"; - - argv_new[0] = cmd; - for (int i = 1; i < argc - 1; i++) - argv_new[i] = argv[i - 1]; - argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ - - int option; - struct optparse_long longopts[] = { - {"all", 'a', OPTPARSE_NONE}, - {"cpu-arch", 'p', OPTPARSE_NONE}, - {"conf", 'c', OPTPARSE_NONE}, - {"help", 'h', OPTPARSE_NONE}, - {0} - }; - struct optparse options; - - optparse_init(&options, argv_new); - options.permute = 0; - while (1) { - option = optparse_long(&options, longopts, NULL); - - if (option == -1) - break; - - switch (option) { - case 'a': - print_em_info_all(); - break; - case 'p': - print_em_info_cpu_arch(); - break; - case 'c': - print_em_info_conf(); - break; - case 'h': - print_em_info_help(); - break; - case '?': - odph_cli_log("Error: %s\n", options.errmsg); - return; - default: - odph_cli_log("Unknown Error\n"); - return; - } - } -} - -static void print_em_pool_all(void) -{ - core_log_fn_set(cli_log); - em_pool_info_print_all(); - core_log_fn_set(NULL); -} - -static void print_em_pool(em_pool_t pool, const char *pool_name) -{ - if (pool == EM_POOL_UNDEF) { - if (pool_name) - odph_cli_log("Error: can't find EM pool %s.\n", pool_name); - else - odph_cli_log("Error: can't find EM pool %" PRI_POOL "\n", pool); - return; - } - - core_log_fn_set(cli_log); - pool_info_print_hdr(1); - pool_info_print(pool); - core_log_fn_set(NULL); -} - -static void print_em_pool_help(void) -{ - const char *usage = "Usage: em_pool_print [OPTION]\n" - "Print EM pool related information\n" - "\n" - "Options:\n" - " -a, --all\tPrint info of all pools\n" - " -i, --id \tPrint info of \n" - " -n, --name \tPrint info of \n" - " -h, --help\tDisplay this help\n"; - - odph_cli_log(usage); -} - -static void cmd_em_pool_print(int argc, char *argv[]) -{ - /* Command em_pool_print takes maximum 2 arguments */ - const int max_args = 2; - - /* When no argument is given, print all pool info */ - if (argc == 0) { - print_em_pool_all(); - return; - } else if (argc > max_args) { - odph_cli_log("Error: extra parameter given to command!\n"); - return; - } - - /* Unlike getopt, optparse does not require an argument count as input to - * indicate the number of arguments in argv. Instead, it uses NULL pointer - * to decide the end of argument array argv. - * - * argv here contains only CLI command options. To emulate a real command, - * argv_new is constructed to include command name. - */ - argc += 1/*Cmd str "em_pool_print"*/ + 1/*Terminating NULL pointer*/; - char *argv_new[argc]; - char cmd[MAX_CMD_LEN] = "em_pool_print"; - - argv_new[0] = cmd; - for (int i = 1; i < argc - 1; i++) - argv_new[i] = argv[i - 1]; - argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ - - em_pool_t pool; - int option; - struct optparse_long longopts[] = { - {"all", 'a', OPTPARSE_NONE}, - {"id", 'i', OPTPARSE_REQUIRED}, - {"name", 'n', OPTPARSE_REQUIRED}, - {"help", 'h', OPTPARSE_NONE}, - {0} - }; - struct optparse options; - - optparse_init(&options, argv_new); - options.permute = 0; - while (1) { - option = optparse_long(&options, longopts, NULL); - if (option == -1) /* No more options */ - break; - - switch (option) { - case 'a': - print_em_pool_all(); - break; - case 'i': - pool = (em_pool_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); - print_em_pool(pool, NULL); - break; - case 'n': - pool = pool_find(options.optarg); - print_em_pool(pool, options.optarg); - break; - case 'h': - print_em_pool_help(); - break; - case '?': - odph_cli_log("Error: %s\n", options.errmsg); - return; - default: - odph_cli_log("Unknown Error\n"); - return; - } - } -} - -static void print_em_queue_help(void) -{ - const char *usage = "Usage: em_queue_print [OPTION]\n" - "Print EM queue information\n" - "\n" - "Options:\n" - " -c, --capa\tPrint queue capabilities\n" - " -a, --all\tPrint info about all queues\n" - " -h, --help\tDisplay this help\n"; - odph_cli_log(usage); -} - -static void print_em_queue_capa(void) -{ - core_log_fn_set(cli_log); - print_queue_capa(); - core_log_fn_set(NULL); -} - -static void print_em_queue_all(void) -{ - core_log_fn_set(cli_log); - print_queue_info(); - core_log_fn_set(NULL); -} - -static void cmd_em_queue_print(int argc, char *argv[]) -{ - /* All current options accept no argument */ - const int max_args = 1; - - /* When no argument is given, print info about all EM queues */ - if (argc == 0) { - print_em_queue_all(); - return; - } else if (argc > max_args) { - odph_cli_log("Error: extra parameter given to command!\n"); - return; - } - - /* Unlike getopt, optparse does not require an argument count as input to - * indicate the number of arguments in argv. Instead, it uses NULL pointer - * to decide the end of argument array argv. - * - * argv here contains only CLI command options. To emulate a real command, - * argv_new is constructed to include command name. - */ - argc += 1/*Cmd str "em_queue_print"*/ + 1/*Terminating NULL pointer*/; - char *argv_new[argc]; - char cmd[MAX_CMD_LEN] = "em_queue_print"; - - argv_new[0] = cmd; - for (int i = 1; i < argc - 1; i++) - argv_new[i] = argv[i - 1]; - argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ - - int option; - struct optparse_long longopts[] = { - {"capa", 'c', OPTPARSE_NONE}, - {"all", 'a', OPTPARSE_NONE}, - {"help", 'h', OPTPARSE_NONE}, - {0} - }; - struct optparse options; - - optparse_init(&options, argv_new); - options.permute = 0; - while (1) { - option = optparse_long(&options, longopts, NULL); - if (option == -1) /* No more options */ - break; - - switch (option) { - case 'c': - print_em_queue_capa(); - break; - case 'a': - print_em_queue_all(); - break; - case 'h': - print_em_queue_help(); - break; - case '?': - odph_cli_log("Error: %s\n", options.errmsg); - return; - default: - odph_cli_log("Unknown Error\n"); - return; - } - } -} - -static void print_em_qgrp_help(void) -{ - const char *usage = "Usage: em_qgrp_print [OPTION]\n" - "Print EM queue group information\n" - "\n" - "Options:\n" - " -a, --all(default)\tPrint info about all EM queue groups\n" - " -i, --id \tPrint the queue info of \n" - " -n, --name \tPrint the queue info of \n" - " -h, --help\tDisplay this help\n"; - odph_cli_log(usage); -} - -static void print_em_qgrp_all(void) -{ - core_log_fn_set(cli_log); - queue_group_info_print_all(); - core_log_fn_set(NULL); -} - -static void print_em_qgrp_queues(const em_queue_group_t qgrp, const char *name) -{ - if (qgrp == EM_QUEUE_GROUP_UNDEF) { - if (name) - odph_cli_log("Error: can't find queue group %s!\n", name); - else - odph_cli_log("Error: can't find queue group %" PRI_QGRP "!\n", qgrp); - return; - } - - core_log_fn_set(cli_log); - queue_group_queues_print(qgrp); - core_log_fn_set(NULL); -} - -static void cmd_em_qgrp_print(int argc, char *argv[]) -{ - /* em_qgrp_print takes maximum 2 arguments */ - const int max_args = 2; - - /* When no argument is given, print all EM queue group info */ - if (argc == 0) { - print_em_qgrp_all(); - return; - } else if (argc > max_args) { - odph_cli_log("Error: extra parameter given to command!\n"); - return; - } - - /* Unlike getopt, optparse does not require an argument count as input to - * indicate the number of arguments in argv. Instead, it uses NULL pointer - * to decide the end of argument array argv. - * - * argv here contains only CLI command options. To emulate a real command, - * argv_new is constructed to include command name. - */ - argc += 1/*Cmd str "em_qgrp_print"*/ + 1/*Terminating NULL pointer*/; - char *argv_new[argc]; - char cmd[MAX_CMD_LEN] = "em_qgrp_print"; - - argv_new[0] = cmd; - for (int i = 1; i < argc - 1; i++) - argv_new[i] = argv[i - 1]; - argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ - - em_queue_group_t qgrp; - int option; - struct optparse_long longopts[] = { - {"all", 'a', OPTPARSE_NONE}, - {"id", 'i', OPTPARSE_REQUIRED}, - {"name", 'n', OPTPARSE_REQUIRED}, - {"help", 'h', OPTPARSE_NONE}, - {0} - }; - struct optparse options; - - optparse_init(&options, argv_new); - options.permute = 0; - while (1) { - option = optparse_long(&options, longopts, NULL); - - if (option == -1) - break; /* No more options */ - - switch (option) { - case 'a': - print_em_qgrp_all(); - break; - case 'i': - qgrp = (em_queue_group_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); - print_em_qgrp_queues(qgrp, NULL); - break; - case 'n': - qgrp = em_queue_group_find(options.optarg); - print_em_qgrp_queues(qgrp, options.optarg); - break; - case 'h': - print_em_qgrp_help(); - break; - case '?': - odph_cli_log("Error: %s\n", options.errmsg); - return; - default: - odph_cli_log("Unknown Error\n"); - return; - } - } -} - -static void cmd_em_core_print(int argc, char *argv[]) -{ - (void)argv; - /* Print EM core map */ - if (argc == 0) { - core_log_fn_set(cli_log); - print_core_map_info(); - core_log_fn_set(NULL); - } else { - odph_cli_log("Error: extra parameter given to command!\n"); - } -} - -static void print_em_eo_help(void) -{ - const char *usage = "Usage: em_eo_print [OPTION]\n" - "Print EO information\n" - "\n" - "Options:\n" - " -a, --all\tPrint all EO info\n" - " -i, --id \tPrint info about all queues of \n" - " -n, --name \tPrint info about all queues of \n" - " -h, --help\tDisplay this help\n"; - - odph_cli_log(usage); -} - -static void print_em_eo_all(void) -{ - core_log_fn_set(cli_log); - eo_info_print_all(); - core_log_fn_set(NULL); -} - -static void print_em_eo(const em_eo_t eo, const char *name) -{ - if (eo == EM_EO_UNDEF) { - if (name) - odph_cli_log("Error: can't find EO %s\n", name); - else - odph_cli_log("Error: can't find EO %" PRI_EO "\n", eo); - return; - } - - core_log_fn_set(cli_log); - eo_queue_info_print(eo); - core_log_fn_set(NULL); -} - -static void cmd_em_eo_print(int argc, char *argv[]) -{ - /* em_eo_print takes maximum 2 arguments */ - const int max_args = 2; - - /* When no argument is given, print all eo info */ - if (argc == 0) { - print_em_eo_all(); - return; - } else if (argc > max_args) { - odph_cli_log("Error: extra parameter given to command!\n"); - return; - } - - /* Unlike getopt, optparse does not require an argument count as input to - * indicate the number of arguments in argv. Instead, it uses NULL pointer - * to decide the end of argument array argv. - * - * argv here contains only CLI command options. To emulate a real command, - * argv_new is constructed to include command name. - */ - argc += 1/*Cmd str "em_eo_print"*/ + 1/*Terminating NULL pointer*/; - char *argv_new[argc]; - char cmd[MAX_CMD_LEN] = "em_eo_print"; - - argv_new[0] = cmd; - for (int i = 1; i < argc - 1; i++) - argv_new[i] = argv[i - 1]; - argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ - - em_eo_t eo; - int option; - struct optparse_long longopts[] = { - {"all", 'a', OPTPARSE_NONE}, - {"id", 'i', OPTPARSE_REQUIRED}, - {"name", 'n', OPTPARSE_REQUIRED}, - {"help", 'h', OPTPARSE_NONE}, - {0} - }; - struct optparse options; - - optparse_init(&options, argv_new); - options.permute = 0; - while (1) { - option = optparse_long(&options, longopts, NULL); - if (option == -1) /* No more options */ - break; - - switch (option) { - case 'a': - print_em_eo_all(); - break; - case 'i': - eo = (em_eo_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); - print_em_eo(eo, NULL); - break; - case 'n': - eo = em_eo_find(options.optarg); - print_em_eo(eo, options.optarg); - break; - case 'h': - print_em_eo_help(); - break; - case '?': - odph_cli_log("Error: %s\n", options.errmsg); - return; - default: - odph_cli_log("Unknown Error\n"); - return; - } - } -} - -static void print_em_agrp_help(void) -{ - const char *usage = "Usage: em_agrp_print [OPTION]\n" - "Print info about atomic groups\n" - "\n" - "Options:\n" - " -a, --all\tPrint info about all atomic groups\n" - " -i, --id \tPrint info about all queues of \n" - " -n, --name \tPrint info about all queues of \n" - " -h, --help\tDisplay this help\n"; - - odph_cli_log(usage); -} - -static void print_em_agrp_all(void) -{ - core_log_fn_set(cli_log); - print_atomic_group_info(); - core_log_fn_set(NULL); -} - -static void print_em_agrp(em_atomic_group_t ag, const char *ag_name) -{ - if (ag == EM_ATOMIC_GROUP_UNDEF) { - if (ag_name) - odph_cli_log("Error: can't find atomic group %s\n", ag_name); - else - odph_cli_log("Error: can't find atomic group %" PRI_AGRP "\n", ag); - return; - } - - core_log_fn_set(cli_log); - print_atomic_group_queues(ag); - core_log_fn_set(NULL); -} - -static void cmd_em_agrp_print(int argc, char *argv[]) -{ - /* em_agrp_print takes maximum 2 arguments */ - const int max_args = 2; - - /* When no argument is given, print info about all atomic groups */ - if (argc == 0) { - print_em_agrp_all(); - return; - } else if (argc > max_args) { - odph_cli_log("Error: extra parameter given to command!\n"); - return; - } - - /* Unlike getopt, optparse does not require an argument count as input to - * indicate the number of arguments in argv. Instead, it uses NULL pointer - * to decide the end of argument array argv. - * - * argv here contains only CLI command options. To emulate a real command, - * argv_new is constructed to include command name. - */ - argc += 1/*Cmd name "em_agrp_print"*/ + 1/*Terminating NULL pointer*/; - char *argv_new[argc]; - char cmd[MAX_CMD_LEN] = "em_agrp_print"; - - argv_new[0] = cmd; - for (int i = 1; i < argc - 1; i++) - argv_new[i] = argv[i - 1]; - argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ - - em_atomic_group_t ag; - int option; - struct optparse_long longopts[] = { - {"all", 'a', OPTPARSE_NONE}, - {"id", 'i', OPTPARSE_REQUIRED}, - {"name", 'n', OPTPARSE_REQUIRED}, - {"help", 'h', OPTPARSE_NONE}, - {0} - }; - struct optparse options; - - optparse_init(&options, argv_new); - options.permute = 0; - - while (1) { - option = optparse_long(&options, longopts, NULL); - - if (option == -1) - break; - - switch (option) { - case 'a': - print_em_agrp_all(); - break; - case 'i': - ag = (em_atomic_group_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); - print_em_agrp(ag, NULL); - break; - case 'n': - ag = em_atomic_group_find(options.optarg); - print_em_agrp(ag, options.optarg); - break; - case 'h': - print_em_agrp_help(); - break; - case '?': - odph_cli_log("Error: %s\n", options.errmsg); - return; - default: - odph_cli_log("Unknown Error\n"); - return; - } - } -} - -static void cmd_em_egrp_print(int argc, char *argv[]) -{ - (void)argv; - /* When no argument is given, print info about all event groups */ - if (argc == 0) { - core_log_fn_set(cli_log); - event_group_info_print(); - core_log_fn_set(NULL); - } else { - odph_cli_log("Error: extra parameter given to command!\n"); - } -} - -static int cli_register_em_commands(void) -{ - /* Register em commands */ - if (odph_cli_register_command("em_agrp_print", cmd_em_agrp_print, - "[a|i |n |h]")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_agrp_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_eo_print", cmd_em_eo_print, - "[a|i |n |h]")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_eo_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_egrp_print", cmd_em_egrp_print, "")) { - EM_LOG(EM_LOG_ERR, "Registering EM cmd em_egrp_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_info_print", cmd_em_info_print, - "[a|p|c|h]")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_info_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_pool_print", cmd_em_pool_print, - "[a|i |n |h]")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_pool_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_queue_print", cmd_em_queue_print, - "[a|c|h]")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_queue_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_qgrp_print", cmd_em_qgrp_print, - "[a|i |n |h]")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_qgrp_print failed.\n"); - return -1; - } - - if (odph_cli_register_command("em_core_print", cmd_em_core_print, "")) { - EM_LOG(EM_LOG_ERR, "Registering EM command em_core_print failed.\n"); - return -1; - } - - return 0; -} - -static int read_config_file(void) -{ - /* Conf option: cli.enable - runtime enable/disable cli */ - const char *cli_conf = "cli.enable"; - bool cli_enable = false; - int ret = em_libconfig_lookup_bool(&em_shm->libconfig, cli_conf, - &cli_enable); - - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", cli_conf); - return -1; - } - - EM_PRINT("EM CLI config:\n"); - /* store & print the value */ - em_shm->opt.cli.enable = (int)cli_enable; - EM_PRINT(" %s: %s(%d)\n", cli_conf, cli_enable ? "true" : "false", - cli_enable); - - cli_conf = "cli.ip_addr"; - ret = em_libconfig_lookup_string(&em_shm->libconfig, cli_conf, - &em_shm->opt.cli.ip_addr); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", cli_conf); - return -1; - } - EM_PRINT(" %s: %s\n", cli_conf, em_shm->opt.cli.ip_addr); - - cli_conf = "cli.port"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, cli_conf, - &em_shm->opt.cli.port); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", cli_conf); - return -1; - } - EM_PRINT(" %s: %d\n", cli_conf, em_shm->opt.cli.port); - - return 0; -} - -static int cli_shm_setup(void) -{ - if (cli_shm != NULL) { - EM_LOG(EM_LOG_ERR, "EM CLI shared memory ptr already set!\n"); - return -1; - } - - /* - * Reserve the CLI shared memory once at start-up. - */ - uint32_t flags = 0; - -#if ODP_VERSION_API_NUM(1, 33, 0) < ODP_VERSION_API - odp_shm_capability_t shm_capa; - int ret = odp_shm_capability(&shm_capa); - - if (ret) { - EM_LOG(EM_LOG_ERR, "shm capability error:%d\n", ret); - return -1; - } - - /* No huge pages needed for the CLI shm */ - if (shm_capa.flags & ODP_SHM_NO_HP) - flags |= ODP_SHM_NO_HP; -#endif - odp_shm_t shm = odp_shm_reserve("em_cli", sizeof(em_cli_shm_t), - ODP_CACHE_LINE_SIZE, flags); - - if (shm == ODP_SHM_INVALID) { - EM_LOG(EM_LOG_ERR, "EM CLI shared memory reservation failed!\n"); - return -1; - } - - cli_shm = odp_shm_addr(shm); - - if (cli_shm == NULL) { - EM_LOG(EM_LOG_ERR, "EM CLI shared memory ptr NULL!\n"); - return -1; - } - - memset(cli_shm, 0, sizeof(em_cli_shm_t)); - - /* Store shm handle, can be used in stop_em_cli() to free the memory */ - cli_shm->this_shm = shm; - - return 0; -} - -static int cli_shm_lookup(void) -{ - odp_shm_t shm; - em_cli_shm_t *shm_addr; - - /* Lookup the EM shared memory on each EM-core */ - shm = odp_shm_lookup("em_cli"); - if (shm == ODP_SHM_INVALID) { - EM_LOG(EM_LOG_ERR, "Shared memory lookup failed!\n"); - return -1; - } - - shm_addr = odp_shm_addr(shm); - if (!shm_addr) { - EM_LOG(EM_LOG_ERR, "Shared memory ptr NULL\n"); - return -1; - } - - if (em_shm->conf.process_per_core && cli_shm == NULL) - cli_shm = shm_addr; - - if (shm_addr != cli_shm) { - EM_LOG(EM_LOG_ERR, "CLI shared memory init fails: cli_shm:%p != shm_addr:%p\n", - cli_shm, shm_addr); - return -1; - } - - return 0; -} - -static int cli_shm_free(void) -{ - if (odp_shm_free(cli_shm->this_shm)) { - EM_LOG(EM_LOG_ERR, "Error: odp_shm_free() failed\n"); - return -1; - } - - /* Set cli_shm = NULL to allow a new call to cli_shm_setup() */ - cli_shm = NULL; - - return 0; -} - -static int cli_thr_fn(__attribute__((__unused__)) void *arg) -{ - init_ext_thread(); - - /* Run CLI server. */ - if (odph_cli_run()) { - EM_LOG(EM_LOG_ERR, "Failed to start CLI server.\n"); - exit(EXIT_FAILURE); - } - - /* em_term_core_cli() */ - return 0; -} - -/* - * Run EM CLI server - * - * When executing this function, the CLI is accepting client connections and - * running commands from a client, if one is connected. - * - * @return EM_OK if successful. - */ -static em_status_t run_em_cli(void) -{ - /* Prepare CLI parameters */ - odph_cli_param_t cli_param = {0}; - - odph_cli_param_init(&cli_param); - cli_param.hostname = "EM-ODP"; - cli_param.address = em_shm->opt.cli.ip_addr; - cli_param.port = em_shm->opt.cli.port; - - /* Initialize CLI helper */ - if (odph_cli_init(&cli_param)) { - EM_LOG(EM_LOG_ERR, "Error: odph_cli_init() failed.\n"); - return EM_ERR_LIB_FAILED; - } - - /* Register EM CLI commands */ - if (cli_register_em_commands()) { - EM_LOG(EM_LOG_ERR, "Error: cli_register_em_commands() failed.\n"); - return EM_ERR_LIB_FAILED; - } - - /* Create thread to run CLI server */ - odp_cpumask_t cpumask; - odph_thread_common_param_t thr_common; - odph_thread_param_t thr_param; - odp_instance_t instance; - - if (odp_cpumask_default_control(&cpumask, 1) != 1) { - EM_LOG(EM_LOG_ERR, "Failed to get default CPU mask.\n"); - return EM_ERR_LIB_FAILED; - } - - if (odp_instance(&instance)) { - EM_LOG(EM_LOG_ERR, "Failed to get odp instance.\n"); - return EM_ERR_LIB_FAILED; - } - - odph_thread_common_param_init(&thr_common); - thr_common.instance = instance; - thr_common.cpumask = &cpumask; - thr_common.thread_model = 0; /* 0: Use pthread for the CLI */ - - odph_thread_param_init(&thr_param); - thr_param.thr_type = ODP_THREAD_CONTROL; - thr_param.start = cli_thr_fn; - thr_param.arg = NULL; - - /* Set up EM CLI shared memory */ - if (cli_shm_setup()) { - EM_LOG(EM_LOG_ERR, "Error: cli_shm_setup() failed.\n"); - return EM_ERR_ALLOC_FAILED; - } - - EM_PRINT("Starting CLI server on %s:%d\n", cli_param.address, cli_param.port); - - /* Create EM CLI server thread and store the thread ID to be used in - * stop_em_cli() to wait for the thread to exit. - */ - if (odph_thread_create(&cli_shm->em_cli_thread, &thr_common, - &thr_param, 1) != 1) { - EM_LOG(EM_LOG_ERR, "Failed to create CLI server thread.\n"); - cli_shm_free(); - return -1; - } - - return EM_OK; -} - -/* - * Stop EM CLI server - * - * Stop accepting new client connections and disconnect any connected client. - * - * @return EM_OK if successful. - */ -static em_status_t stop_em_cli(void) -{ - if (odph_cli_stop()) { - EM_LOG(EM_LOG_ERR, "Failed to stop CLI.\n"); - goto error; - } - - if (odph_thread_join(&cli_shm->em_cli_thread, 1) != 1) { - EM_LOG(EM_LOG_ERR, "Failed to join server thread.\n"); - goto error; - } - - if (odph_cli_term()) { - EM_LOG(EM_LOG_ERR, "Failed to terminate CLI.\n"); - goto error; - } - - cli_shm_free(); - EM_PRINT("\nCLI server terminated!\n"); - - return EM_OK; - -error: - cli_shm_free(); - return EM_ERR_LIB_FAILED; -} - -em_status_t emcli_init(void) -{ - em_status_t stat = EM_OK; - - /* Store libconf options to em_shm */ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - if (em_shm->opt.cli.enable) { - stat = run_em_cli(); - - if (stat != EM_OK) { - EM_LOG(EM_LOG_ERR, "%s(): run_em_cli() failed:%" PRI_STAT "\n", - __func__, stat); - } - } - - return stat; -} - -em_status_t emcli_init_local(void) -{ - if (!em_shm->opt.cli.enable) - return EM_OK; - - int ret = cli_shm_lookup(); - - if (ret) - return EM_ERR_LIB_FAILED; - - return EM_OK; -} - -em_status_t emcli_term(void) -{ - em_status_t stat = EM_OK; - - if (em_shm->opt.cli.enable) { - stat = stop_em_cli(); - - if (stat != EM_OK) { - EM_LOG(EM_LOG_ERR, "%s(): stop_em_cli() failed:%" PRI_STAT "\n", - __func__, stat); - } - } - - return stat; -} - -em_status_t emcli_term_local(void) -{ - return EM_OK; -} - -#else /* EM_CLI */ -/* Dummy functions for building without odph_cli and libcli support */ -em_status_t emcli_init(void) -{ - return EM_OK; -} - -em_status_t emcli_init_local(void) -{ - return EM_OK; -} - -em_status_t emcli_term(void) -{ - return EM_OK; -} - -em_status_t emcli_term_local(void) -{ - return EM_OK; -} - -#endif /* EM_CLI */ +/* Copyright (c) 2021, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "em_include.h" + +#if EM_CLI + +#define OPTPARSE_IMPLEMENTATION +#include "misc/optparse.h" + +/* Maximum number of bytes (including terminating null byte) for an EM CLI command */ +#define MAX_CMD_LEN 20 + +/* EM CLI shared memory */ +static em_cli_shm_t *cli_shm; + +__attribute__((format(printf, 2, 3))) +static int cli_log(em_log_level_t level, const char *fmt, ...) +{ + (void)level; + + va_list args; + + va_start(args, fmt); + + int r = odph_cli_log_va(fmt, args); + + va_end(args); + + return r; +} + +static void print_em_info_help(void) +{ + const char *usage = "Usage: em_info_print [OPTION]\n" + "Print EM related information.\n" + "\n" + "Options:\n" + " -a, --all\tPrint all EM info\n" + " -p, --cpu-arch\tPrint cpu architure\n" + " -c, --conf\tPrint default and runtime configurations\n" + " -h, --help\tDisplay this help\n"; + odph_cli_log(usage); +} + +static void print_em_info_all(void) +{ + core_log_fn_set(cli_log); + print_em_info(); + core_log_fn_set(NULL); +} + +static void print_em_info_cpu_arch(void) +{ + core_log_fn_set(cli_log); + print_cpu_arch_info(); + core_log_fn_set(NULL); +} + +static void print_em_info_conf(void) +{ + core_log_fn_set(cli_log); + em_libconfig_print(&em_shm->libconfig); + core_log_fn_set(NULL); +} + +static void cmd_em_info_print(int argc, char *argv[]) +{ + /* All current options accept no argument */ + const int max_args = 1; + + /* When no argument is given, print all EM info */ + if (argc == 0) { + print_em_info_all(); + return; + } else if (argc > max_args) { + odph_cli_log("Error: extra parameter given to command!\n"); + return; + } + + /* Unlike getopt, optparse does not require an argument count as input to + * indicate the number of arguments in argv. Instead, it uses NULL pointer + * to decide the end of argument array argv. + * + * argv here contains only CLI command options. To emulate a real command, + * argv_new is constructed to include command name. + */ + argc += 1/*Command name*/ + 1/*Terminating NULL pointer*/; + char *argv_new[argc]; + char cmd[MAX_CMD_LEN] = "em_info_print"; + + argv_new[0] = cmd; + for (int i = 1; i < argc - 1; i++) + argv_new[i] = argv[i - 1]; + argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ + + int option; + struct optparse_long longopts[] = { + {"all", 'a', OPTPARSE_NONE}, + {"cpu-arch", 'p', OPTPARSE_NONE}, + {"conf", 'c', OPTPARSE_NONE}, + {"help", 'h', OPTPARSE_NONE}, + {0} + }; + struct optparse options; + + optparse_init(&options, argv_new); + options.permute = 0; + while (1) { + option = optparse_long(&options, longopts, NULL); + + if (option == -1) + break; + + switch (option) { + case 'a': + print_em_info_all(); + break; + case 'p': + print_em_info_cpu_arch(); + break; + case 'c': + print_em_info_conf(); + break; + case 'h': + print_em_info_help(); + break; + case '?': + odph_cli_log("Error: %s\n", options.errmsg); + return; + default: + odph_cli_log("Unknown Error\n"); + return; + } + } +} + +static void print_em_pool_all(void) +{ + core_log_fn_set(cli_log); + em_pool_info_print_all(); + core_log_fn_set(NULL); +} + +static void print_em_pool(em_pool_t pool, const char *pool_name) +{ + if (pool == EM_POOL_UNDEF) { + if (pool_name) + odph_cli_log("Error: can't find EM pool %s.\n", pool_name); + else + odph_cli_log("Error: can't find EM pool %" PRI_POOL "\n", pool); + return; + } + + core_log_fn_set(cli_log); + pool_info_print_hdr(1); + pool_info_print(pool); + core_log_fn_set(NULL); +} + +static void print_em_pool_help(void) +{ + const char *usage = "Usage: em_pool_print [OPTION]\n" + "Print EM pool related information\n" + "\n" + "Options:\n" + " -a, --all\tPrint info of all pools\n" + " -i, --id \tPrint info of \n" + " -n, --name \tPrint info of \n" + " -h, --help\tDisplay this help\n"; + + odph_cli_log(usage); +} + +static void cmd_em_pool_print(int argc, char *argv[]) +{ + /* Command em_pool_print takes maximum 2 arguments */ + const int max_args = 2; + + /* When no argument is given, print all pool info */ + if (argc == 0) { + print_em_pool_all(); + return; + } else if (argc > max_args) { + odph_cli_log("Error: extra parameter given to command!\n"); + return; + } + + /* Unlike getopt, optparse does not require an argument count as input to + * indicate the number of arguments in argv. Instead, it uses NULL pointer + * to decide the end of argument array argv. + * + * argv here contains only CLI command options. To emulate a real command, + * argv_new is constructed to include command name. + */ + argc += 1/*Cmd str "em_pool_print"*/ + 1/*Terminating NULL pointer*/; + char *argv_new[argc]; + char cmd[MAX_CMD_LEN] = "em_pool_print"; + + argv_new[0] = cmd; + for (int i = 1; i < argc - 1; i++) + argv_new[i] = argv[i - 1]; + argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ + + em_pool_t pool; + int option; + struct optparse_long longopts[] = { + {"all", 'a', OPTPARSE_NONE}, + {"id", 'i', OPTPARSE_REQUIRED}, + {"name", 'n', OPTPARSE_REQUIRED}, + {"help", 'h', OPTPARSE_NONE}, + {0} + }; + struct optparse options; + + optparse_init(&options, argv_new); + options.permute = 0; + while (1) { + option = optparse_long(&options, longopts, NULL); + if (option == -1) /* No more options */ + break; + + switch (option) { + case 'a': + print_em_pool_all(); + break; + case 'i': + pool = (em_pool_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); + print_em_pool(pool, NULL); + break; + case 'n': + pool = pool_find(options.optarg); + print_em_pool(pool, options.optarg); + break; + case 'h': + print_em_pool_help(); + break; + case '?': + odph_cli_log("Error: %s\n", options.errmsg); + return; + default: + odph_cli_log("Unknown Error\n"); + return; + } + } +} + +static void print_em_queue_help(void) +{ + const char *usage = "Usage: em_queue_print [OPTION]\n" + "Print EM queue information\n" + "\n" + "Options:\n" + " -c, --capa\tPrint queue capabilities\n" + " -a, --all\tPrint info about all queues\n" + " -h, --help\tDisplay this help\n"; + odph_cli_log(usage); +} + +static void print_em_queue_capa(void) +{ + core_log_fn_set(cli_log); + print_queue_capa(); + core_log_fn_set(NULL); +} + +static void print_em_queue_all(void) +{ + core_log_fn_set(cli_log); + print_queue_info(); + core_log_fn_set(NULL); +} + +static void cmd_em_queue_print(int argc, char *argv[]) +{ + /* All current options accept no argument */ + const int max_args = 1; + + /* When no argument is given, print info about all EM queues */ + if (argc == 0) { + print_em_queue_all(); + return; + } else if (argc > max_args) { + odph_cli_log("Error: extra parameter given to command!\n"); + return; + } + + /* Unlike getopt, optparse does not require an argument count as input to + * indicate the number of arguments in argv. Instead, it uses NULL pointer + * to decide the end of argument array argv. + * + * argv here contains only CLI command options. To emulate a real command, + * argv_new is constructed to include command name. + */ + argc += 1/*Cmd str "em_queue_print"*/ + 1/*Terminating NULL pointer*/; + char *argv_new[argc]; + char cmd[MAX_CMD_LEN] = "em_queue_print"; + + argv_new[0] = cmd; + for (int i = 1; i < argc - 1; i++) + argv_new[i] = argv[i - 1]; + argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ + + int option; + struct optparse_long longopts[] = { + {"capa", 'c', OPTPARSE_NONE}, + {"all", 'a', OPTPARSE_NONE}, + {"help", 'h', OPTPARSE_NONE}, + {0} + }; + struct optparse options; + + optparse_init(&options, argv_new); + options.permute = 0; + while (1) { + option = optparse_long(&options, longopts, NULL); + if (option == -1) /* No more options */ + break; + + switch (option) { + case 'c': + print_em_queue_capa(); + break; + case 'a': + print_em_queue_all(); + break; + case 'h': + print_em_queue_help(); + break; + case '?': + odph_cli_log("Error: %s\n", options.errmsg); + return; + default: + odph_cli_log("Unknown Error\n"); + return; + } + } +} + +static void print_em_qgrp_help(void) +{ + const char *usage = "Usage: em_qgrp_print [OPTION]\n" + "Print EM queue group information\n" + "\n" + "Options:\n" + " -a, --all(default)\tPrint info about all EM queue groups\n" + " -i, --id \tPrint the queue info of \n" + " -n, --name \tPrint the queue info of \n" + " -h, --help\tDisplay this help\n"; + odph_cli_log(usage); +} + +static void print_em_qgrp_all(void) +{ + core_log_fn_set(cli_log); + queue_group_info_print_all(); + core_log_fn_set(NULL); +} + +static void print_em_qgrp_queues(const em_queue_group_t qgrp, const char *name) +{ + if (qgrp == EM_QUEUE_GROUP_UNDEF) { + if (name) + odph_cli_log("Error: can't find queue group %s!\n", name); + else + odph_cli_log("Error: can't find queue group %" PRI_QGRP "!\n", qgrp); + return; + } + + core_log_fn_set(cli_log); + queue_group_queues_print(qgrp); + core_log_fn_set(NULL); +} + +static void cmd_em_qgrp_print(int argc, char *argv[]) +{ + /* em_qgrp_print takes maximum 2 arguments */ + const int max_args = 2; + + /* When no argument is given, print all EM queue group info */ + if (argc == 0) { + print_em_qgrp_all(); + return; + } else if (argc > max_args) { + odph_cli_log("Error: extra parameter given to command!\n"); + return; + } + + /* Unlike getopt, optparse does not require an argument count as input to + * indicate the number of arguments in argv. Instead, it uses NULL pointer + * to decide the end of argument array argv. + * + * argv here contains only CLI command options. To emulate a real command, + * argv_new is constructed to include command name. + */ + argc += 1/*Cmd str "em_qgrp_print"*/ + 1/*Terminating NULL pointer*/; + char *argv_new[argc]; + char cmd[MAX_CMD_LEN] = "em_qgrp_print"; + + argv_new[0] = cmd; + for (int i = 1; i < argc - 1; i++) + argv_new[i] = argv[i - 1]; + argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ + + em_queue_group_t qgrp; + int option; + struct optparse_long longopts[] = { + {"all", 'a', OPTPARSE_NONE}, + {"id", 'i', OPTPARSE_REQUIRED}, + {"name", 'n', OPTPARSE_REQUIRED}, + {"help", 'h', OPTPARSE_NONE}, + {0} + }; + struct optparse options; + + optparse_init(&options, argv_new); + options.permute = 0; + while (1) { + option = optparse_long(&options, longopts, NULL); + + if (option == -1) + break; /* No more options */ + + switch (option) { + case 'a': + print_em_qgrp_all(); + break; + case 'i': + qgrp = (em_queue_group_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); + print_em_qgrp_queues(qgrp, NULL); + break; + case 'n': + qgrp = em_queue_group_find(options.optarg); + print_em_qgrp_queues(qgrp, options.optarg); + break; + case 'h': + print_em_qgrp_help(); + break; + case '?': + odph_cli_log("Error: %s\n", options.errmsg); + return; + default: + odph_cli_log("Unknown Error\n"); + return; + } + } +} + +static void cmd_em_core_print(int argc, char *argv[]) +{ + (void)argv; + /* Print EM core map */ + if (argc == 0) { + core_log_fn_set(cli_log); + print_core_map_info(); + core_log_fn_set(NULL); + } else { + odph_cli_log("Error: extra parameter given to command!\n"); + } +} + +static void print_em_eo_help(void) +{ + const char *usage = "Usage: em_eo_print [OPTION]\n" + "Print EO information\n" + "\n" + "Options:\n" + " -a, --all\tPrint all EO info\n" + " -i, --id \tPrint info about all queues of \n" + " -n, --name \tPrint info about all queues of \n" + " -h, --help\tDisplay this help\n"; + + odph_cli_log(usage); +} + +static void print_em_eo_all(void) +{ + core_log_fn_set(cli_log); + eo_info_print_all(); + core_log_fn_set(NULL); +} + +static void print_em_eo(const em_eo_t eo, const char *name) +{ + if (eo == EM_EO_UNDEF) { + if (name) + odph_cli_log("Error: can't find EO %s\n", name); + else + odph_cli_log("Error: can't find EO %" PRI_EO "\n", eo); + return; + } + + core_log_fn_set(cli_log); + eo_queue_info_print(eo); + core_log_fn_set(NULL); +} + +static void cmd_em_eo_print(int argc, char *argv[]) +{ + /* em_eo_print takes maximum 2 arguments */ + const int max_args = 2; + + /* When no argument is given, print all eo info */ + if (argc == 0) { + print_em_eo_all(); + return; + } else if (argc > max_args) { + odph_cli_log("Error: extra parameter given to command!\n"); + return; + } + + /* Unlike getopt, optparse does not require an argument count as input to + * indicate the number of arguments in argv. Instead, it uses NULL pointer + * to decide the end of argument array argv. + * + * argv here contains only CLI command options. To emulate a real command, + * argv_new is constructed to include command name. + */ + argc += 1/*Cmd str "em_eo_print"*/ + 1/*Terminating NULL pointer*/; + char *argv_new[argc]; + char cmd[MAX_CMD_LEN] = "em_eo_print"; + + argv_new[0] = cmd; + for (int i = 1; i < argc - 1; i++) + argv_new[i] = argv[i - 1]; + argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ + + em_eo_t eo; + int option; + struct optparse_long longopts[] = { + {"all", 'a', OPTPARSE_NONE}, + {"id", 'i', OPTPARSE_REQUIRED}, + {"name", 'n', OPTPARSE_REQUIRED}, + {"help", 'h', OPTPARSE_NONE}, + {0} + }; + struct optparse options; + + optparse_init(&options, argv_new); + options.permute = 0; + while (1) { + option = optparse_long(&options, longopts, NULL); + if (option == -1) /* No more options */ + break; + + switch (option) { + case 'a': + print_em_eo_all(); + break; + case 'i': + eo = (em_eo_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); + print_em_eo(eo, NULL); + break; + case 'n': + eo = em_eo_find(options.optarg); + print_em_eo(eo, options.optarg); + break; + case 'h': + print_em_eo_help(); + break; + case '?': + odph_cli_log("Error: %s\n", options.errmsg); + return; + default: + odph_cli_log("Unknown Error\n"); + return; + } + } +} + +static void print_em_agrp_help(void) +{ + const char *usage = "Usage: em_agrp_print [OPTION]\n" + "Print info about atomic groups\n" + "\n" + "Options:\n" + " -a, --all\tPrint info about all atomic groups\n" + " -i, --id \tPrint info about all queues of \n" + " -n, --name \tPrint info about all queues of \n" + " -h, --help\tDisplay this help\n"; + + odph_cli_log(usage); +} + +static void print_em_agrp_all(void) +{ + core_log_fn_set(cli_log); + print_atomic_group_info(); + core_log_fn_set(NULL); +} + +static void print_em_agrp(em_atomic_group_t ag, const char *ag_name) +{ + if (ag == EM_ATOMIC_GROUP_UNDEF) { + if (ag_name) + odph_cli_log("Error: can't find atomic group %s\n", ag_name); + else + odph_cli_log("Error: can't find atomic group %" PRI_AGRP "\n", ag); + return; + } + + core_log_fn_set(cli_log); + print_atomic_group_queues(ag); + core_log_fn_set(NULL); +} + +static void cmd_em_agrp_print(int argc, char *argv[]) +{ + /* em_agrp_print takes maximum 2 arguments */ + const int max_args = 2; + + /* When no argument is given, print info about all atomic groups */ + if (argc == 0) { + print_em_agrp_all(); + return; + } else if (argc > max_args) { + odph_cli_log("Error: extra parameter given to command!\n"); + return; + } + + /* Unlike getopt, optparse does not require an argument count as input to + * indicate the number of arguments in argv. Instead, it uses NULL pointer + * to decide the end of argument array argv. + * + * argv here contains only CLI command options. To emulate a real command, + * argv_new is constructed to include command name. + */ + argc += 1/*Cmd name "em_agrp_print"*/ + 1/*Terminating NULL pointer*/; + char *argv_new[argc]; + char cmd[MAX_CMD_LEN] = "em_agrp_print"; + + argv_new[0] = cmd; + for (int i = 1; i < argc - 1; i++) + argv_new[i] = argv[i - 1]; + argv_new[argc - 1] = NULL; /*Terminating NULL pointer*/ + + em_atomic_group_t ag; + int option; + struct optparse_long longopts[] = { + {"all", 'a', OPTPARSE_NONE}, + {"id", 'i', OPTPARSE_REQUIRED}, + {"name", 'n', OPTPARSE_REQUIRED}, + {"help", 'h', OPTPARSE_NONE}, + {0} + }; + struct optparse options; + + optparse_init(&options, argv_new); + options.permute = 0; + + while (1) { + option = optparse_long(&options, longopts, NULL); + + if (option == -1) + break; + + switch (option) { + case 'a': + print_em_agrp_all(); + break; + case 'i': + ag = (em_atomic_group_t)(uintptr_t)(int)strtol(options.optarg, NULL, 0); + print_em_agrp(ag, NULL); + break; + case 'n': + ag = em_atomic_group_find(options.optarg); + print_em_agrp(ag, options.optarg); + break; + case 'h': + print_em_agrp_help(); + break; + case '?': + odph_cli_log("Error: %s\n", options.errmsg); + return; + default: + odph_cli_log("Unknown Error\n"); + return; + } + } +} + +static void cmd_em_egrp_print(int argc, char *argv[]) +{ + (void)argv; + /* When no argument is given, print info about all event groups */ + if (argc == 0) { + core_log_fn_set(cli_log); + event_group_info_print(); + core_log_fn_set(NULL); + } else { + odph_cli_log("Error: extra parameter given to command!\n"); + } +} + +static int cli_register_em_commands(void) +{ + /* Register em commands */ + if (odph_cli_register_command("em_agrp_print", cmd_em_agrp_print, + "[a|i |n |h]")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_agrp_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_eo_print", cmd_em_eo_print, + "[a|i |n |h]")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_eo_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_egrp_print", cmd_em_egrp_print, "")) { + EM_LOG(EM_LOG_ERR, "Registering EM cmd em_egrp_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_info_print", cmd_em_info_print, + "[a|p|c|h]")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_info_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_pool_print", cmd_em_pool_print, + "[a|i |n |h]")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_pool_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_queue_print", cmd_em_queue_print, + "[a|c|h]")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_queue_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_qgrp_print", cmd_em_qgrp_print, + "[a|i |n |h]")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_qgrp_print failed.\n"); + return -1; + } + + if (odph_cli_register_command("em_core_print", cmd_em_core_print, "")) { + EM_LOG(EM_LOG_ERR, "Registering EM command em_core_print failed.\n"); + return -1; + } + + return 0; +} + +static int read_config_file(void) +{ + /* Conf option: cli.enable - runtime enable/disable cli */ + const char *cli_conf = "cli.enable"; + bool cli_enable = false; + int ret = em_libconfig_lookup_bool(&em_shm->libconfig, cli_conf, + &cli_enable); + + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", cli_conf); + return -1; + } + + EM_PRINT("EM CLI config:\n"); + /* store & print the value */ + em_shm->opt.cli.enable = (int)cli_enable; + EM_PRINT(" %s: %s(%d)\n", cli_conf, cli_enable ? "true" : "false", + cli_enable); + + cli_conf = "cli.ip_addr"; + ret = em_libconfig_lookup_string(&em_shm->libconfig, cli_conf, + &em_shm->opt.cli.ip_addr); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", cli_conf); + return -1; + } + EM_PRINT(" %s: %s\n", cli_conf, em_shm->opt.cli.ip_addr); + + cli_conf = "cli.port"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, cli_conf, + &em_shm->opt.cli.port); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", cli_conf); + return -1; + } + EM_PRINT(" %s: %d\n", cli_conf, em_shm->opt.cli.port); + + return 0; +} + +static int cli_shm_setup(void) +{ + if (cli_shm != NULL) { + EM_LOG(EM_LOG_ERR, "EM CLI shared memory ptr already set!\n"); + return -1; + } + + /* + * Reserve the CLI shared memory once at start-up. + */ + uint32_t flags = 0; + +#if ODP_VERSION_API_NUM(1, 33, 0) < ODP_VERSION_API + odp_shm_capability_t shm_capa; + int ret = odp_shm_capability(&shm_capa); + + if (ret) { + EM_LOG(EM_LOG_ERR, "shm capability error:%d\n", ret); + return -1; + } + + /* No huge pages needed for the CLI shm */ + if (shm_capa.flags & ODP_SHM_NO_HP) + flags |= ODP_SHM_NO_HP; +#endif + odp_shm_t shm = odp_shm_reserve("em_cli", sizeof(em_cli_shm_t), + ODP_CACHE_LINE_SIZE, flags); + + if (shm == ODP_SHM_INVALID) { + EM_LOG(EM_LOG_ERR, "EM CLI shared memory reservation failed!\n"); + return -1; + } + + cli_shm = odp_shm_addr(shm); + + if (cli_shm == NULL) { + EM_LOG(EM_LOG_ERR, "EM CLI shared memory ptr NULL!\n"); + return -1; + } + + memset(cli_shm, 0, sizeof(em_cli_shm_t)); + + /* Store shm handle, can be used in stop_em_cli() to free the memory */ + cli_shm->this_shm = shm; + + return 0; +} + +static int cli_shm_lookup(void) +{ + odp_shm_t shm; + em_cli_shm_t *shm_addr; + + /* Lookup the EM shared memory on each EM-core */ + shm = odp_shm_lookup("em_cli"); + if (shm == ODP_SHM_INVALID) { + EM_LOG(EM_LOG_ERR, "Shared memory lookup failed!\n"); + return -1; + } + + shm_addr = odp_shm_addr(shm); + if (!shm_addr) { + EM_LOG(EM_LOG_ERR, "Shared memory ptr NULL\n"); + return -1; + } + + if (em_shm->conf.process_per_core && cli_shm == NULL) + cli_shm = shm_addr; + + if (shm_addr != cli_shm) { + EM_LOG(EM_LOG_ERR, "CLI shared memory init fails: cli_shm:%p != shm_addr:%p\n", + cli_shm, shm_addr); + return -1; + } + + return 0; +} + +static int cli_shm_free(void) +{ + if (odp_shm_free(cli_shm->this_shm)) { + EM_LOG(EM_LOG_ERR, "Error: odp_shm_free() failed\n"); + return -1; + } + + /* Set cli_shm = NULL to allow a new call to cli_shm_setup() */ + cli_shm = NULL; + + return 0; +} + +static int cli_thr_fn(__attribute__((__unused__)) void *arg) +{ + init_ext_thread(); + + /* Run CLI server. */ + if (odph_cli_run()) { + EM_LOG(EM_LOG_ERR, "Failed to start CLI server.\n"); + exit(EXIT_FAILURE); + } + + /* em_term_core_cli() */ + return 0; +} + +/* + * Run EM CLI server + * + * When executing this function, the CLI is accepting client connections and + * running commands from a client, if one is connected. + * + * @return EM_OK if successful. + */ +static em_status_t run_em_cli(void) +{ + /* Prepare CLI parameters */ + odph_cli_param_t cli_param = {0}; + + odph_cli_param_init(&cli_param); + cli_param.hostname = "EM-ODP"; + cli_param.address = em_shm->opt.cli.ip_addr; + cli_param.port = (uint16_t)em_shm->opt.cli.port; + + /* Initialize CLI helper */ + if (odph_cli_init(&cli_param)) { + EM_LOG(EM_LOG_ERR, "Error: odph_cli_init() failed.\n"); + return EM_ERR_LIB_FAILED; + } + + /* Register EM CLI commands */ + if (cli_register_em_commands()) { + EM_LOG(EM_LOG_ERR, "Error: cli_register_em_commands() failed.\n"); + return EM_ERR_LIB_FAILED; + } + + /* Create thread to run CLI server */ + odp_cpumask_t cpumask; + odph_thread_common_param_t thr_common; + odph_thread_param_t thr_param; + odp_instance_t instance; + + if (odp_cpumask_default_control(&cpumask, 1) != 1) { + EM_LOG(EM_LOG_ERR, "Failed to get default CPU mask.\n"); + return EM_ERR_LIB_FAILED; + } + + if (odp_instance(&instance)) { + EM_LOG(EM_LOG_ERR, "Failed to get odp instance.\n"); + return EM_ERR_LIB_FAILED; + } + + odph_thread_common_param_init(&thr_common); + thr_common.instance = instance; + thr_common.cpumask = &cpumask; + thr_common.thread_model = 0; /* 0: Use pthread for the CLI */ + + odph_thread_param_init(&thr_param); + thr_param.thr_type = ODP_THREAD_CONTROL; + thr_param.start = cli_thr_fn; + thr_param.arg = NULL; + + /* Set up EM CLI shared memory */ + if (cli_shm_setup()) { + EM_LOG(EM_LOG_ERR, "Error: cli_shm_setup() failed.\n"); + return EM_ERR_ALLOC_FAILED; + } + + EM_PRINT("Starting CLI server on %s:%d\n", cli_param.address, cli_param.port); + + /* Create EM CLI server thread and store the thread ID to be used in + * stop_em_cli() to wait for the thread to exit. + */ + if (odph_thread_create(&cli_shm->em_cli_thread, &thr_common, + &thr_param, 1) != 1) { + EM_LOG(EM_LOG_ERR, "Failed to create CLI server thread.\n"); + cli_shm_free(); + return -1; + } + + return EM_OK; +} + +/* + * Stop EM CLI server + * + * Stop accepting new client connections and disconnect any connected client. + * + * @return EM_OK if successful. + */ +static em_status_t stop_em_cli(void) +{ + if (odph_cli_stop()) { + EM_LOG(EM_LOG_ERR, "Failed to stop CLI.\n"); + goto error; + } + + if (odph_thread_join(&cli_shm->em_cli_thread, 1) != 1) { + EM_LOG(EM_LOG_ERR, "Failed to join server thread.\n"); + goto error; + } + + if (odph_cli_term()) { + EM_LOG(EM_LOG_ERR, "Failed to terminate CLI.\n"); + goto error; + } + + cli_shm_free(); + EM_PRINT("\nCLI server terminated!\n"); + + return EM_OK; + +error: + cli_shm_free(); + return EM_ERR_LIB_FAILED; +} + +em_status_t emcli_init(void) +{ + em_status_t stat = EM_OK; + + /* Store libconf options to em_shm */ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + if (em_shm->opt.cli.enable) { + stat = run_em_cli(); + + if (stat != EM_OK) { + EM_LOG(EM_LOG_ERR, "%s(): run_em_cli() failed:%" PRI_STAT "\n", + __func__, stat); + } + } + + return stat; +} + +em_status_t emcli_init_local(void) +{ + if (!em_shm->opt.cli.enable) + return EM_OK; + + int ret = cli_shm_lookup(); + + if (ret) + return EM_ERR_LIB_FAILED; + + return EM_OK; +} + +em_status_t emcli_term(void) +{ + em_status_t stat = EM_OK; + + if (em_shm->opt.cli.enable) { + stat = stop_em_cli(); + + if (stat != EM_OK) { + EM_LOG(EM_LOG_ERR, "%s(): stop_em_cli() failed:%" PRI_STAT "\n", + __func__, stat); + } + } + + return stat; +} + +em_status_t emcli_term_local(void) +{ + return EM_OK; +} + +#else /* EM_CLI */ +/* Dummy functions for building without odph_cli and libcli support */ +em_status_t emcli_init(void) +{ + return EM_OK; +} + +em_status_t emcli_init_local(void) +{ + return EM_OK; +} + +em_status_t emcli_term(void) +{ + return EM_OK; +} + +em_status_t emcli_term_local(void) +{ + return EM_OK; +} + +#endif /* EM_CLI */ diff --git a/src/em_core.c b/src/em_core.c index 2e5328be..3ea43995 100644 --- a/src/em_core.c +++ b/src/em_core.c @@ -1,161 +1,159 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -em_status_t -core_map_init(core_map_t *const core_map, int core_count, - const em_core_mask_t *phys_mask) -{ - int phys_id = 0; - int logic_id = 0; - int mask_count; - int last_phys_id; - - if (core_count > EM_MAX_CORES || core_count > odp_thread_count_max()) - return EM_ERR_TOO_LARGE; - - mask_count = em_core_mask_count(phys_mask); - if (mask_count != core_count) - return EM_ERR_BAD_STATE; - - last_phys_id = em_core_mask_idx(core_count, phys_mask); - if (last_phys_id >= EM_MAX_CORES) - return EM_ERR_BAD_ID; - - memset(core_map, 0, sizeof(core_map_t)); - - env_spinlock_init(&core_map->lock); - - /* Store the EM core count, returned by em_core_count() */ - core_map->count = core_count; - - em_core_mask_copy(&core_map->phys_mask, phys_mask); - em_core_mask_set_count(core_count, &core_map->logic_mask); - - while (logic_id < core_count && phys_id < EM_MAX_CORES) { - if (em_core_mask_isset(phys_id, &core_map->phys_mask)) { - core_map->phys_vs_logic.logic[phys_id] = logic_id; - core_map->phys_vs_logic.phys[logic_id] = phys_id; - logic_id++; - } - phys_id++; - } - - return EM_OK; -} - -em_status_t -core_map_init_local(core_map_t *const core_map) -{ - em_locm_t *const locm = &em_locm; - const int phys_core = odp_cpu_id(); - const int odp_thr = odp_thread_id(); - - if (unlikely(phys_core >= EM_MAX_CORES)) - return EM_ERR_BAD_ID; - if (odp_thr >= ODP_THREAD_COUNT_MAX || - odp_thr >= odp_thread_count_max()) - return EM_ERR_LIB_FAILED; - - /* Store the EM core id of this core, returned by em_core_id() */ - locm->core_id = core_map->phys_vs_logic.logic[phys_core]; - - if (unlikely(locm->core_id < 0)) - return EM_ERR_TOO_SMALL; - if (unlikely(locm->core_id >= EM_MAX_CORES)) - return EM_ERR_TOO_LARGE; - - env_spinlock_lock(&core_map->lock); - core_map->thr_vs_logic.logic[odp_thr] = locm->core_id; - core_map->thr_vs_logic.odp_thr[locm->core_id] = odp_thr; - env_spinlock_unlock(&core_map->lock); - - return EM_OK; -} - -int logic_to_thr_core_id(const int logic_core) -{ - if (unlikely(logic_core >= EM_MAX_CORES)) - return -1; - - return em_shm->core_map.thr_vs_logic.odp_thr[logic_core]; -} - -int thr_to_logic_core_id(const int thr_id) -{ - if (unlikely(thr_id >= ODP_THREAD_COUNT_MAX)) - return -1; - - return em_shm->core_map.thr_vs_logic.logic[thr_id]; -} - -void mask_em2odp(const em_core_mask_t *const em_core_mask, - odp_thrmask_t *const odp_thrmask /*out*/) -{ - int core_count = em_core_count(); - int odp_thread_id; - int i; - - odp_thrmask_zero(odp_thrmask); - - if (unlikely(!em_shm->init.em_init_done)) { - INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_INIT, - "Cannot convert EM-mask to ODP-thrmask,\n" - "not all ODP threads are initialized yet."); - return; - } - - /* EM cores are consequtive 0 -> em_core_count()-1 */ - for (i = 0; i < core_count; i++) { - if (em_core_mask_isset(i, em_core_mask)) { - odp_thread_id = logic_to_thr_core_id(i); - odp_thrmask_set(odp_thrmask, odp_thread_id); - } - } -} - -void mask_em2phys(const em_core_mask_t *const em_core_mask, - odp_cpumask_t *const odp_cpumask /*out*/) -{ - int core_count = em_core_count(); - int cpu_id; - int i; - - odp_cpumask_zero(odp_cpumask); - - /* EM cores are consequtive 0 -> em_core_count()-1 */ - for (i = 0; i < core_count; i++) { - if (em_core_mask_isset(i, em_core_mask)) { - cpu_id = logic_to_phys_core_id(i); - odp_cpumask_set(odp_cpumask, cpu_id); - } - } -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +em_status_t +core_map_init(core_map_t *const core_map, int core_count, + const em_core_mask_t *phys_mask) +{ + int phys_id = 0; + int logic_id = 0; + int mask_count; + int last_phys_id; + + if (core_count > EM_MAX_CORES || core_count > odp_thread_count_max()) + return EM_ERR_TOO_LARGE; + + mask_count = em_core_mask_count(phys_mask); + if (mask_count != core_count) + return EM_ERR_BAD_STATE; + + last_phys_id = em_core_mask_idx(core_count, phys_mask); + if (last_phys_id >= EM_MAX_CORES) + return EM_ERR_BAD_ID; + + memset(core_map, 0, sizeof(core_map_t)); + + env_spinlock_init(&core_map->lock); + + /* Store the EM core count, returned by em_core_count() */ + core_map->count = core_count; + + em_core_mask_copy(&core_map->phys_mask, phys_mask); + em_core_mask_set_count(core_count, &core_map->logic_mask); + + while (logic_id < core_count && phys_id < EM_MAX_CORES) { + if (em_core_mask_isset(phys_id, &core_map->phys_mask)) { + core_map->phys_vs_logic.logic[phys_id] = logic_id; + core_map->phys_vs_logic.phys[logic_id] = phys_id; + logic_id++; + } + phys_id++; + } + + return EM_OK; +} + +em_status_t +core_map_init_local(core_map_t *const core_map) +{ + em_locm_t *const locm = &em_locm; + const int phys_core = odp_cpu_id(); + const int odp_thr = odp_thread_id(); + + if (unlikely(phys_core >= EM_MAX_CORES)) + return EM_ERR_BAD_ID; + if (odp_thr >= ODP_THREAD_COUNT_MAX || + odp_thr >= odp_thread_count_max()) + return EM_ERR_LIB_FAILED; + + /* Store the EM core id of this core, returned by em_core_id() */ + locm->core_id = core_map->phys_vs_logic.logic[phys_core]; + + if (unlikely(locm->core_id < 0)) + return EM_ERR_TOO_SMALL; + if (unlikely(locm->core_id >= EM_MAX_CORES)) + return EM_ERR_TOO_LARGE; + + env_spinlock_lock(&core_map->lock); + core_map->thr_vs_logic.logic[odp_thr] = locm->core_id; + core_map->thr_vs_logic.odp_thr[locm->core_id] = odp_thr; + env_spinlock_unlock(&core_map->lock); + + return EM_OK; +} + +int logic_to_thr_core_id(const int logic_core) +{ + if (unlikely(logic_core >= EM_MAX_CORES)) + return -1; + + return em_shm->core_map.thr_vs_logic.odp_thr[logic_core]; +} + +int thr_to_logic_core_id(const int thr_id) +{ + if (unlikely(thr_id >= ODP_THREAD_COUNT_MAX)) + return -1; + + return em_shm->core_map.thr_vs_logic.logic[thr_id]; +} + +void mask_em2odp(const em_core_mask_t *const em_core_mask, + odp_thrmask_t *const odp_thrmask /*out*/) +{ + int core_count = em_core_count(); + int odp_thread_id; + + odp_thrmask_zero(odp_thrmask); + + if (unlikely(!em_shm->init.em_init_done)) { + INTERNAL_ERROR(EM_ERR_NOT_INITIALIZED, EM_ESCOPE_INIT, + "Cannot convert EM-mask to ODP-thrmask,\n" + "not all ODP threads are initialized yet."); + return; + } + + /* EM cores are consequtive 0 -> em_core_count()-1 */ + for (int i = 0; i < core_count; i++) { + if (em_core_mask_isset(i, em_core_mask)) { + odp_thread_id = logic_to_thr_core_id(i); + odp_thrmask_set(odp_thrmask, odp_thread_id); + } + } +} + +void mask_em2phys(const em_core_mask_t *const em_core_mask, + odp_cpumask_t *const odp_cpumask /*out*/) +{ + int core_count = em_core_count(); + int cpu_id; + + odp_cpumask_zero(odp_cpumask); + + /* EM cores are consequtive 0 -> em_core_count()-1 */ + for (int i = 0; i < core_count; i++) { + if (em_core_mask_isset(i, em_core_mask)) { + cpu_id = logic_to_phys_core_id(i); + odp_cpumask_set(odp_cpumask, cpu_id); + } + } +} diff --git a/src/em_daemon_eo.c b/src/em_daemon_eo.c index da217b78..5dd6a151 100644 --- a/src/em_daemon_eo.c +++ b/src/em_daemon_eo.c @@ -1,215 +1,220 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -#define DAEMON_ERROR(error, ...) \ - INTERNAL_ERROR((error), EM_ESCOPE_DAEMON, ## __VA_ARGS__) - -static em_status_t -daemon_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf); -static em_status_t -daemon_eo_stop(void *eo_ctx, em_eo_t eo); - -void daemon_eo_create(void) -{ - em_eo_t eo; - em_status_t stat; - em_status_t stat_eo_start = EM_ERROR; - - eo = em_eo_create("daemon-eo", daemon_eo_start, NULL /*start_local*/, - daemon_eo_stop, NULL /*stop_local*/, - daemon_eo_receive, NULL); - if (eo == EM_EO_UNDEF) - DAEMON_ERROR(EM_FATAL(EM_ERR_BAD_ID), "daemon-eo create fail"); - - /* Store the EO in shared memory */ - em_shm->daemon.eo = eo; - - stat = em_eo_start(eo, &stat_eo_start, NULL, 0, NULL); - if (stat != EM_OK || stat_eo_start != EM_OK) - DAEMON_ERROR(EM_FATAL(EM_ERR_LIB_FAILED), - "daemon-eo start failed!"); -} - -void daemon_eo_shutdown(void) -{ - const int core = em_core_id(); - const em_eo_t eo = em_shm->daemon.eo; - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t stat; - - EM_PRINT("%s() on EM-core %d\n", __func__, core); - - if (unlikely(eo_elem == NULL)) { - DAEMON_ERROR(EM_FATAL(EM_ERR_BAD_ID), - "daemon-eo handle:%" PRI_EO " invalid!", eo); - return; - } - - /* - * Stop the daemon-EO, i.e. call the daemon-EO global stop func. - * Note: cannot call normal API func em_eo_stop() since that would use - * internal ctrl events that might not be dispatched during shutdown. - */ - /* Change state here to allow em_eo_delete() from EO stop func */ - eo_elem->state = EM_EO_STATE_CREATED; /* == EO_STATE_STOPPED */ - stat = eo_elem->stop_func(eo_elem->eo_ctx, eo); - if (stat != EM_OK) - DAEMON_ERROR(EM_FATAL(EM_ERR_LIB_FAILED), - "daemon-eo stop/delete failed!"); -} - -em_status_t daemon_eo_queues_create(void) -{ - const int num_cores = em_core_count(); - char q_name[EM_QUEUE_NAME_LEN]; - em_queue_t shared_unsched_queue; - em_queue_t queue; - em_queue_conf_t unsch_conf; - - const char *err_str = ""; - int i; - - EM_DBG("%s()\n", __func__); - - /* - * Create shared internal unsched queue used for internal EM messaging. - * Cannot use em_queue_create_static() here since the requested handle - * 'SHARED_INTERNAL_UNSCHED_QUEUE' lies outside of the normal static - * range. - */ - shared_unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); - queue = queue_create("EMctrl-unschedQ-shared", EM_QUEUE_TYPE_UNSCHEDULED, - EM_QUEUE_PRIO_UNDEF, EM_QUEUE_GROUP_UNDEF, - shared_unsched_queue, EM_ATOMIC_GROUP_UNDEF, - NULL /* use default queue config */, &err_str); - if (queue == EM_QUEUE_UNDEF || queue != shared_unsched_queue) - return EM_FATAL(EM_ERR_NOT_FREE); - - /* - * Create static internal per-core UNSCHEDULED queues used for - * internal EM messaging. Cannot use em_queue_create_static() - * here since the requested handles lies outside of the normal - * static range. - */ - memset(&unsch_conf, 0, sizeof(unsch_conf)); - unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE; - - for (i = 0; i < num_cores; i++) { - em_queue_t queue_req; - - queue_req = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + i); - snprintf(q_name, sizeof(q_name), "EMctrl-unschedQ-core%d", i); - q_name[EM_QUEUE_NAME_LEN - 1] = '\0'; - - queue = queue_create(q_name, EM_QUEUE_TYPE_UNSCHEDULED, - EM_QUEUE_PRIO_UNDEF, EM_QUEUE_GROUP_UNDEF, - queue_req, EM_ATOMIC_GROUP_UNDEF, - &unsch_conf, /* request deq-not-mtsafe */ - &err_str); - if (unlikely(queue == EM_QUEUE_UNDEF || queue != queue_req)) - return EM_FATAL(EM_ERR_NOT_FREE); - } - - return EM_OK; -} - -static em_status_t -daemon_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf) -{ - (void)eo_ctx; - (void)eo; - (void)conf; - - EM_PRINT("daemon-eo:%" PRI_EO " starting!\n", eo); - return EM_OK; -} - -static em_status_t -daemon_eo_stop(void *eo_ctx, em_eo_t eo) -{ - em_status_t stat = EM_OK; - eo_elem_t *const eo_elem = eo_elem_get(eo); - - (void)eo_ctx; - - EM_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); - - if (unlikely(eo_elem == NULL)) { - stat = EM_FATAL(EM_ERR_BAD_ID); - DAEMON_ERROR(stat, "daemon-eo handle:%" PRI_EO " invalid!", eo); - return stat; - } - - /* Cannot use API funcs - internal ctrl events might not work */ - stat = queue_disable_all(eo_elem); - stat |= eo_delete_queue_all(eo_elem); - /* Finally delete the daemon-eo, API func is ok here */ - stat |= em_eo_delete(eo); - - const int num_cores = em_core_count(); - em_queue_t unsched_queue; - em_event_t unsched_event; - - unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); - for (;/* flush unsched queue */;) { - unsched_event = em_queue_dequeue(unsched_queue); - if (unsched_event == EM_EVENT_UNDEF) - break; - em_free(unsched_event); - } - stat = em_queue_delete(unsched_queue); - if (unlikely(stat != EM_OK)) - return DAEMON_ERROR(stat, "shared unschedQ delete"); - - for (int i = 0; i < num_cores; i++) { - unsched_queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + i); - - for (;/* flush unsched queue */;) { - unsched_event = em_queue_dequeue(unsched_queue); - if (unsched_event == EM_EVENT_UNDEF) - break; - em_free(unsched_event); - } - - stat = em_queue_delete(unsched_queue); - if (unlikely(stat != EM_OK)) - return DAEMON_ERROR(stat, "core unschedQ:%d delete", i); - } - - return stat; -} - -void daemon_eo_receive(void *eo_ctx, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - internal_event_receive(eo_ctx, event, type, queue, q_ctx); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +#define DAEMON_ERROR(error, ...) \ + INTERNAL_ERROR((error), EM_ESCOPE_DAEMON, ## __VA_ARGS__) + +static em_status_t +daemon_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf); +static em_status_t +daemon_eo_stop(void *eo_ctx, em_eo_t eo); + +void daemon_eo_create(void) +{ + em_eo_t eo; + em_status_t stat; + em_status_t stat_eo_start = EM_ERROR; + + eo = em_eo_create("daemon-eo", daemon_eo_start, NULL /*start_local*/, + daemon_eo_stop, NULL /*stop_local*/, + daemon_eo_receive, NULL); + if (eo == EM_EO_UNDEF) + DAEMON_ERROR(EM_FATAL(EM_ERR_BAD_ID), "daemon-eo create fail"); + + /* Store the EO in shared memory */ + em_shm->daemon.eo = eo; + + stat = em_eo_start(eo, &stat_eo_start, NULL, 0, NULL); + if (stat != EM_OK || stat_eo_start != EM_OK) + DAEMON_ERROR(EM_FATAL(EM_ERR_LIB_FAILED), + "daemon-eo start failed!"); +} + +void daemon_eo_shutdown(void) +{ + const int core = em_core_id(); + const em_eo_t eo = em_shm->daemon.eo; + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t stat; + + EM_PRINT("%s() on EM-core %d\n", __func__, core); + + if (unlikely(eo_elem == NULL)) { + DAEMON_ERROR(EM_FATAL(EM_ERR_BAD_ID), + "daemon-eo handle:%" PRI_EO " invalid!", eo); + return; + } + + /* + * Stop the daemon-EO, i.e. call the daemon-EO global stop func. + * Note: cannot call normal API func em_eo_stop() since that would use + * internal ctrl events that might not be dispatched during shutdown. + */ + /* Change state here to allow em_eo_delete() from EO stop func */ + eo_elem->state = EM_EO_STATE_CREATED; /* == EO_STATE_STOPPED */ + stat = eo_elem->stop_func(eo_elem->eo_ctx, eo); + if (stat != EM_OK) + DAEMON_ERROR(EM_FATAL(EM_ERR_LIB_FAILED), + "daemon-eo stop/delete failed!"); +} + +em_status_t daemon_eo_queues_create(void) +{ + const int num_cores = em_core_count(); + char q_name[EM_QUEUE_NAME_LEN]; + em_queue_t shared_unsched_queue; + em_queue_t queue; + em_queue_conf_t unsch_conf; + + const char *err_str = ""; + + EM_DBG("%s()\n", __func__); + + /* + * Create shared internal unsched queue used for internal EM messaging. + * Cannot use em_queue_create_static() here since the requested handle + * 'SHARED_INTERNAL_UNSCHED_QUEUE' lies outside of the normal static + * range. + */ + shared_unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); + queue = queue_create("EMctrl-unschedQ-shared", EM_QUEUE_TYPE_UNSCHEDULED, + EM_QUEUE_PRIO_UNDEF, EM_QUEUE_GROUP_UNDEF, + shared_unsched_queue, EM_ATOMIC_GROUP_UNDEF, + NULL /* use default queue config */, &err_str); + if (queue == EM_QUEUE_UNDEF || queue != shared_unsched_queue) + return EM_FATAL(EM_ERR_NOT_FREE); + + /* + * Create static internal per-core UNSCHEDULED queues used for + * internal EM messaging. Cannot use em_queue_create_static() + * here since the requested handles lies outside of the normal + * static range. + */ + memset(&unsch_conf, 0, sizeof(unsch_conf)); + unsch_conf.flags |= EM_QUEUE_FLAG_DEQ_NOT_MTSAFE; + + for (int i = 0; i < num_cores; i++) { + em_queue_t queue_req; + + queue_req = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + i); + snprintf(q_name, sizeof(q_name), "EMctrl-unschedQ-core%d", i); + q_name[EM_QUEUE_NAME_LEN - 1] = '\0'; + + queue = queue_create(q_name, EM_QUEUE_TYPE_UNSCHEDULED, + EM_QUEUE_PRIO_UNDEF, EM_QUEUE_GROUP_UNDEF, + queue_req, EM_ATOMIC_GROUP_UNDEF, + &unsch_conf, /* request deq-not-mtsafe */ + &err_str); + if (unlikely(queue == EM_QUEUE_UNDEF || queue != queue_req)) + return EM_FATAL(EM_ERR_NOT_FREE); + } + + return EM_OK; +} + +static em_status_t +daemon_eo_start(void *eo_ctx, em_eo_t eo, const em_eo_conf_t *conf) +{ + (void)eo_ctx; + (void)eo; + (void)conf; + + EM_PRINT("daemon-eo:%" PRI_EO " starting!\n", eo); + return EM_OK; +} + +static em_status_t +daemon_eo_stop(void *eo_ctx, em_eo_t eo) +{ + em_status_t stat = EM_OK; + eo_elem_t *const eo_elem = eo_elem_get(eo); + + (void)eo_ctx; + + EM_PRINT("%s() on EM-core %d\n", __func__, em_core_id()); + + if (unlikely(eo_elem == NULL)) { + stat = EM_FATAL(EM_ERR_BAD_ID); + DAEMON_ERROR(stat, "daemon-eo handle:%" PRI_EO " invalid!", eo); + return stat; + } + + /* Cannot use API funcs - internal ctrl events might not work */ + stat = queue_disable_all(eo_elem); + if (unlikely(stat != EM_OK)) + return DAEMON_ERROR(stat, "daemon-eo disable queues"); + stat = eo_delete_queue_all(eo_elem); + if (unlikely(stat != EM_OK)) + return DAEMON_ERROR(stat, "daemon-eo delete queues"); + /* Finally delete the daemon-eo, API func is ok here */ + stat = em_eo_delete(eo); + if (unlikely(stat != EM_OK)) + return DAEMON_ERROR(stat, "daemon-eo delete"); + + const int num_cores = em_core_count(); + em_queue_t unsched_queue; + em_event_t unsched_event; + + unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); + for (;/* flush unsched queue */;) { + unsched_event = em_queue_dequeue(unsched_queue); + if (unsched_event == EM_EVENT_UNDEF) + break; + em_free(unsched_event); + } + stat = em_queue_delete(unsched_queue); + if (unlikely(stat != EM_OK)) + return DAEMON_ERROR(stat, "shared unschedQ delete"); + + for (int i = 0; i < num_cores; i++) { + unsched_queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + i); + + for (;/* flush unsched queue */;) { + unsched_event = em_queue_dequeue(unsched_queue); + if (unsched_event == EM_EVENT_UNDEF) + break; + em_free(unsched_event); + } + + stat = em_queue_delete(unsched_queue); + if (unlikely(stat != EM_OK)) + return DAEMON_ERROR(stat, "core unschedQ:%d delete", i); + } + + return stat; +} + +void daemon_eo_receive(void *eo_ctx, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + internal_event_receive(eo_ctx, event, type, queue, q_ctx); +} diff --git a/src/em_dispatcher.c b/src/em_dispatcher.c index f6bd1e30..ab87b62c 100644 --- a/src/em_dispatcher.c +++ b/src/em_dispatcher.c @@ -1,164 +1,166 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -static int read_config_file(void) -{ - const char *conf_str; - int val = 0; - int64_t val64 = 0; - int ret; - - /* - * Option: dispatch.poll_ctrl_interval - */ - conf_str = "dispatch.poll_ctrl_interval"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - - if (val < 0) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.dispatch.poll_ctrl_interval = val; - EM_PRINT(" %s: %d\n", conf_str, val); - - /* - * Option: dispatch.poll_ctrl_interval_ns - */ - conf_str = "dispatch.poll_ctrl_interval_ns"; - ret = em_libconfig_lookup_int64(&em_shm->libconfig, conf_str, &val64); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - - if (val64 < 0) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %" PRId64 "'\n", - conf_str, val64); - return -1; - } - /* store & print the value */ - em_shm->opt.dispatch.poll_ctrl_interval_ns = val64; - long double sec = (long double)val64 / 1000000000.0; - - EM_PRINT(" %s: %" PRId64 "ns (%Lfs)\n", conf_str, val64, sec); - - /* Store ns value as odp_time_t */ - em_shm->opt.dispatch.poll_ctrl_interval_time = odp_time_global_from_ns(val64); - - /* - * Option: dispatch.poll_drain_interval - */ - conf_str = "dispatch.poll_drain_interval"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - - if (val < 0) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.dispatch.poll_drain_interval = val; - EM_PRINT(" %s: %d\n", conf_str, val); - - /* - * Option: dispatch.poll_drain_interval_ns - */ - conf_str = "dispatch.poll_drain_interval_ns"; - ret = em_libconfig_lookup_int64(&em_shm->libconfig, conf_str, &val64); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - - if (val64 < 0) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %" PRId64 "'\n", - conf_str, val64); - return -1; - } - /* store & print the value */ - em_shm->opt.dispatch.poll_drain_interval_ns = val64; - sec = (long double)val64 / 1000000000.0; - - EM_PRINT(" %s: %" PRId64 "ns (%Lfs)\n", conf_str, val64, sec); - - /* Store ns value as odp_time_t */ - em_shm->opt.dispatch.poll_drain_interval_time = odp_time_global_from_ns(val64); - - return 0; -} - -em_status_t dispatch_init(void) -{ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - return EM_OK; -} - -em_status_t dispatch_init_local(void) -{ - em_locm_t *const locm = &em_locm; - odp_time_t poll_period = em_shm->opt.dispatch.poll_ctrl_interval_time; - odp_time_t now = odp_time_global(); - - /* - * Initialize values so that the _first_ call to em_dispatch() on each - * core will trigger a poll of the unscheduled ctrl queues. - */ - locm->dispatch_cnt = 1; - locm->dispatch_last_run = odp_time_diff(now, poll_period); /* wrap OK */ - - /* - * Sanity check: - * Perform the same calculation as in dispatch_poll_ctrl_queue() - * to verify that ctrl queue polling is triggered by the first dispatch. - */ - odp_time_t period = odp_time_diff(now, locm->dispatch_last_run); - - if (odp_time_cmp(period, poll_period) != 0) /* 0: periods equal */ - return EM_ERR_TOONEAR; - - locm->poll_drain_dispatch_cnt = em_shm->opt.dispatch.poll_drain_interval; - locm->poll_drain_dispatch_last_run = now; - - return EM_OK; -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +static int read_config_file(void) +{ + const char *conf_str; + int val = 0; + int64_t val64 = 0; + int ret; + + /* + * Option: dispatch.poll_ctrl_interval + */ + conf_str = "dispatch.poll_ctrl_interval"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + + if (val < 0) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.dispatch.poll_ctrl_interval = val; + EM_PRINT(" %s: %d\n", conf_str, val); + + /* + * Option: dispatch.poll_ctrl_interval_ns + */ + conf_str = "dispatch.poll_ctrl_interval_ns"; + ret = em_libconfig_lookup_int64(&em_shm->libconfig, conf_str, &val64); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + + if (val64 < 0) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %" PRId64 "'\n", + conf_str, val64); + return -1; + } + /* store & print the value */ + em_shm->opt.dispatch.poll_ctrl_interval_ns = val64; + long double sec = (long double)val64 / 1000000000.0; + + EM_PRINT(" %s: %" PRId64 "ns (%Lfs)\n", conf_str, val64, sec); + + /* Store ns value as odp_time_t */ + em_shm->opt.dispatch.poll_ctrl_interval_time = odp_time_global_from_ns(val64); + + /* + * Option: dispatch.poll_drain_interval + */ + conf_str = "dispatch.poll_drain_interval"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + + if (val < 0) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.dispatch.poll_drain_interval = val; + EM_PRINT(" %s: %d\n", conf_str, val); + + /* + * Option: dispatch.poll_drain_interval_ns + */ + conf_str = "dispatch.poll_drain_interval_ns"; + ret = em_libconfig_lookup_int64(&em_shm->libconfig, conf_str, &val64); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + + if (val64 < 0) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %" PRId64 "'\n", + conf_str, val64); + return -1; + } + /* store & print the value */ + em_shm->opt.dispatch.poll_drain_interval_ns = val64; + sec = (long double)val64 / 1000000000.0; + + EM_PRINT(" %s: %" PRId64 "ns (%Lfs)\n", conf_str, val64, sec); + + /* Store ns value as odp_time_t */ + em_shm->opt.dispatch.poll_drain_interval_time = odp_time_global_from_ns(val64); + + return 0; +} + +em_status_t dispatch_init(void) +{ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + return EM_OK; +} + +em_status_t dispatch_init_local(void) +{ + em_locm_t *const locm = &em_locm; + odp_time_t poll_period = em_shm->opt.dispatch.poll_ctrl_interval_time; + odp_time_t now = odp_time_global(); + + /* + * Initialize values so that the _first_ call to em_dispatch() on each + * core will trigger a poll of the unscheduled ctrl queues. + */ + locm->dispatch_cnt = 1; + locm->dispatch_last_run = odp_time_diff(now, poll_period); /* wrap OK */ + + /* + * Sanity check: + * Perform the same calculation as in dispatch_poll_ctrl_queue() + * to verify that ctrl queue polling is triggered by the first dispatch. + */ + odp_time_t period = odp_time_diff(now, locm->dispatch_last_run); + + if (odp_time_cmp(period, poll_period) != 0) /* 0: periods equal */ + return EM_ERR_TOONEAR; + + locm->poll_drain_dispatch_cnt = em_shm->opt.dispatch.poll_drain_interval; + locm->poll_drain_dispatch_last_run = now; + + locm->idle_state = IDLE_STATE_ACTIVE; + + return EM_OK; +} diff --git a/src/em_dispatcher.h b/src/em_dispatcher.h index 2c3a921d..7493d80f 100644 --- a/src/em_dispatcher.h +++ b/src/em_dispatcher.h @@ -1,582 +1,650 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal dispatcher functions - */ - -#ifndef EM_DISPATCHER_H_ -#define EM_DISPATCHER_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -em_status_t dispatch_init(void); -em_status_t dispatch_init_local(void); - -static inline void -dispatch_multi_receive(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], - const int num_events, queue_elem_t *const q_elem, - const bool check_local_qs); -static inline void -dispatch_single_receive(event_hdr_t *ev_hdr_tbl[], const int num_events, - queue_elem_t *const q_elem, const bool check_local_qs); - -/** - * Helper: Remove undef-event entries from ev_tbl[] - */ -static inline int pack_ev_tbl(em_event_t ev_tbl[/*in,out*/], const int num) -{ - if (num == 1) { - if (ev_tbl[0] != EM_EVENT_UNDEF) - return 1; - else - return 0; - } - - int pack = 0; - - for (int i = 0; i < num; i++) { - if (ev_tbl[i] != EM_EVENT_UNDEF) { - if (pack < i) - ev_tbl[pack] = ev_tbl[i]; - pack++; - } - } - - return pack; -} - -/** - * Run all dispatch enter-callback functions. - * - * @note Neither EO-receive nor any further enter-callbacks will be called if - * all events have been dropped by the callbacks already run, i.e. - * no callback or EO-receive will be called with 'num=0'. - * - * @param eo EO handle - * @param eo_ctx EO context data - * @param[in,out] ev_tbl Event table - * @param num_events Number of events in the event table - * @param queue Queue from which this event came from - * @param q_ctx Queue context data - * - * @return The number of events in ev_tbl[] after all dispatch enter callbacks - */ -static inline int -dispatch_enter_cb(em_eo_t eo, void **eo_ctx, - em_event_t ev_tbl[/*in,out*/], const int num_events, - em_queue_t *queue, void **q_ctx) -{ - const hook_tbl_t *cb_tbl = em_shm->dispatch_enter_cb_tbl; - em_dispatch_enter_func_t dispatch_enter_fn; - int num = num_events; - - for (int i = 0; i < EM_CALLBACKS_MAX && num > 0; i++) { - dispatch_enter_fn = cb_tbl->tbl[i].disp_enter; - if (dispatch_enter_fn == NULL) - break; - dispatch_enter_fn(eo, eo_ctx, ev_tbl, num, queue, q_ctx); - num = pack_ev_tbl(ev_tbl, num); - } - - return num; -} - -/** - * Run all dispatch exit-callback functions. - * - * @param eo EO handle - */ -static inline void -dispatch_exit_cb(em_eo_t eo) -{ - const hook_tbl_t *dispatch_exit_cb_tbl = em_shm->dispatch_exit_cb_tbl; - em_dispatch_exit_func_t dispatch_exit_fn; - int i; - - for (i = 0; i < EM_CALLBACKS_MAX; i++) { - dispatch_exit_fn = dispatch_exit_cb_tbl->tbl[i].disp_exit; - if (dispatch_exit_fn == NULL) - return; - dispatch_exit_fn(eo); - } -} - -static inline void -call_eo_receive_fn(const em_eo_t eo, const em_receive_func_t eo_receive_func, - event_hdr_t *ev_hdr, queue_elem_t *const q_elem) -{ - em_locm_t *const locm = &em_locm; - em_queue_t queue = q_elem->queue; - void *queue_ctx = q_elem->context; - void *eo_ctx = q_elem->eo_ctx; - em_event_t event = event_hdr_to_event(ev_hdr); - int num = 1; - - locm->current.q_elem = q_elem; - locm->current.rcv_multi_cnt = 1; - /* Check and set core local event group (before dispatch callback(s)) */ - event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); - - if (esv_enabled()) - event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH); - - if (EM_DISPATCH_CALLBACKS_ENABLE) { - em_event_t ev_tbl[1] = {event}; - - num = dispatch_enter_cb(eo, &eo_ctx, ev_tbl/*in,out*/, 1, - &queue, &queue_ctx); - if (num && ev_tbl[0] != event) { - /* user-callback changed event: update event & hdr */ - event = ev_tbl[0]; - ev_hdr = event_to_hdr(event); - } - } - - if (likely(num == 1)) { - em_event_type_t event_type = ev_hdr->event_type; - /* - * Call the EO receive function - * (only if the dispatch callback(s) did not free the event) - */ - eo_receive_func(eo_ctx, event, event_type, - queue, queue_ctx); - } - - if (EM_DISPATCH_CALLBACKS_ENABLE) - dispatch_exit_cb(eo); - - /* - * Event belongs to an event_group, update the count and - * if requested send notifications - */ - if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) { - /* - * Atomically decrease the event group count. - * If the new count is zero, send notification events. - */ - event_group_count_decrement(1); - } - locm->current.egrp = EM_EVENT_GROUP_UNDEF; -} - -/** - * @note All events belong to the same event group - * @note Event type dropped from multi-event receive - use em_event_get_type() - */ -static inline void -call_eo_receive_multi_fn(const em_eo_t eo, - const em_receive_multi_func_t eo_receive_multi_func, - em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], - const int num_events, queue_elem_t *const q_elem) -{ - em_locm_t *const locm = &em_locm; - em_queue_t queue = q_elem->queue; - void *queue_ctx = q_elem->context; - void *eo_ctx = q_elem->eo_ctx; - int num = num_events; - - locm->current.q_elem = q_elem; - locm->current.rcv_multi_cnt = num_events; - /* Check and set core local event group (before dispatch callback(s)) */ - event_group_set_local(ev_hdr_tbl[0]->egrp, ev_hdr_tbl[0]->egrp_gen, - num_events); - - if (esv_enabled()) - evstate_em2usr_multi(ev_tbl, ev_hdr_tbl, num_events, - EVSTATE__DISPATCH_MULTI); - - if (EM_DISPATCH_CALLBACKS_ENABLE) - num = dispatch_enter_cb(eo, &eo_ctx, - ev_tbl/*in,out*/, num_events, - &queue, &queue_ctx); - if (likely(num > 0)) { - /* - * Call the EO multi-event receive function - * (only if the dispatch callback(s) did not free all events) - */ - eo_receive_multi_func(eo_ctx, ev_tbl, num, queue, queue_ctx); - } - - if (EM_DISPATCH_CALLBACKS_ENABLE) - dispatch_exit_cb(eo); - - /* - * Event belongs to an event_group, update the count and - * if requested send notifications - */ - if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) { - /* - * Atomically decrease the event group count. - * If the new count is zero, send notification events. - */ - event_group_count_decrement(num_events); - } - locm->current.egrp = EM_EVENT_GROUP_UNDEF; -} - -static inline void -dispatch_local_queues(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], - const int num) -{ - const bool esv_ena = esv_enabled(); - queue_elem_t *q_elem; - int idx = 0; /* index into ev_tbl[] & ev_hdr_tbl[] */ - int ev_cnt; /* number of events to the same local-queue */ - int i; - - /* Loop through 'num' events and dispatch in batches to local queues */ - do { - /* dst local queue */ - q_elem = ev_hdr_tbl[idx]->q_elem; - /* count events sent to the same local queue */ - for (i = idx + 1; i < num && q_elem == ev_hdr_tbl[i]->q_elem; i++) - ; - ev_cnt = i - idx; /* '1 to num' events */ - - if (unlikely(q_elem == NULL || q_elem->state != EM_QUEUE_STATE_READY)) { - if (esv_ena) - evstate_em2usr_multi(&ev_tbl[idx], - &ev_hdr_tbl[idx], ev_cnt, - EVSTATE__DISPATCH_LOCAL__FAIL); - em_free_multi(&ev_tbl[idx], ev_cnt); - /* Consider removing the logging */ - EM_LOG(EM_LOG_PRINT, - "EM info: %s(): localQ:%" PRI_QUEUE ":\n" - "Not ready - state:%d drop:%d events\n", - __func__, q_elem->queue, - q_elem->state, ev_cnt); - idx += ev_cnt; - continue; - } - - if (q_elem->use_multi_rcv) - dispatch_multi_receive(&ev_tbl[idx], - &ev_hdr_tbl[idx], - ev_cnt, q_elem, false); - else - dispatch_single_receive(&ev_hdr_tbl[idx], - ev_cnt, q_elem, false); - idx += ev_cnt; - } while (idx < num); -} - -static inline void -check_local_queues(void) -{ - em_event_t ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; - int num; - - if (em_locm.local_queues.empty) - return; - - /* - * Check if the previous EO receive function sent events to a - * local queue ('EM_QUEUE_TYPE_LOCAL') - and if so, dispatch - * those events immediately. - */ - for (;;) { - num = next_local_queue_events(ev_tbl, EM_SCHED_MULTI_MAX_BURST); - if (num <= 0) - return; - - event_to_hdr_multi(ev_tbl, ev_hdr_tbl, num); - - dispatch_local_queues(ev_tbl, ev_hdr_tbl, num); - } -} - -/** - * Count events (hdrs) sent/tagged with the same event group - */ -static inline int -count_same_evgroup(event_hdr_t *ev_hdr_tbl[], const unsigned int num) -{ - if (unlikely(num < 2)) - return num; - - const em_event_group_t egrp = ev_hdr_tbl[0]->egrp; - unsigned int i = 1; /* 2nd hdr */ - - if (EM_EVENT_GROUP_SAFE_MODE) { - const int32_t egrp_gen = ev_hdr_tbl[0]->egrp_gen; - - for (; i < num && - egrp == ev_hdr_tbl[i]->egrp && - egrp_gen == ev_hdr_tbl[i]->egrp_gen; i++) - ; - } else { - for (; i < num && - egrp == ev_hdr_tbl[i]->egrp; i++) - ; - } - - return i; -} - -static inline void -dispatch_multi_receive(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], - const int num_events, queue_elem_t *const q_elem, - const bool check_local_qs) -{ - const em_eo_t eo = q_elem->eo; - const em_receive_multi_func_t eo_rcv_multi_fn = - q_elem->receive_multi_func; - int idx = 0; /* index into ev_hdr_tbl[] */ - int i; - int j; - - do { - /* count same event groups: 1 to num_events */ - const int egrp_cnt = count_same_evgroup(&ev_hdr_tbl[idx], - num_events - idx); - const int max = q_elem->max_events; - const int num = MIN(egrp_cnt, max); - const int rounds = egrp_cnt / num; - const int left_over = egrp_cnt % num; - - if (check_local_qs) { - em_locm_t *const locm = &em_locm; - - j = idx; - for (i = 0; i < rounds; i++) { - locm->event_burst_cnt -= num; - call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, - &ev_tbl[j], - &ev_hdr_tbl[j], - num, q_elem); - check_local_queues(); - j += num; - } - if (left_over) { - locm->event_burst_cnt = 0; - call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, - &ev_tbl[j], - &ev_hdr_tbl[j], - left_over, q_elem); - check_local_queues(); - } - } else { - j = idx; - for (i = 0; i < rounds; i++) { - call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, - &ev_tbl[j], - &ev_hdr_tbl[j], - num, q_elem); - j += num; - } - if (left_over) { - call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, - &ev_tbl[j], - &ev_hdr_tbl[j], - left_over, q_elem); - } - } - - idx += egrp_cnt; - } while (idx < num_events); -} - -static inline void -dispatch_single_receive(event_hdr_t *ev_hdr_tbl[], const int num_events, - queue_elem_t *const q_elem, const bool check_local_qs) -{ - const em_eo_t eo = q_elem->eo; - const em_receive_func_t eo_rcv_fn = q_elem->receive_func; - int i; - - if (check_local_qs) { - for (i = 0; i < num_events; i++) { - em_locm.event_burst_cnt--; - call_eo_receive_fn(eo, eo_rcv_fn, - ev_hdr_tbl[i], q_elem); - check_local_queues(); - } - } else { - for (i = 0; i < num_events; i++) - call_eo_receive_fn(eo, eo_rcv_fn, - ev_hdr_tbl[i], q_elem); - } -} - -/** - * Dispatch events - call the EO-receive functions and pass the - * events for processing - */ -static inline void -dispatch_events(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], - const int num_events, queue_elem_t *const q_elem) -{ - em_locm_t *const locm = &em_locm; - const em_queue_type_t q_type = q_elem->type; - em_sched_context_type_t sched_ctx_type = EM_SCHED_CONTEXT_TYPE_NONE; - - if (q_type == EM_QUEUE_TYPE_ATOMIC) - sched_ctx_type = EM_SCHED_CONTEXT_TYPE_ATOMIC; - else if (q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED) - sched_ctx_type = EM_SCHED_CONTEXT_TYPE_ORDERED; - - locm->current.sched_context_type = sched_ctx_type; - locm->current.sched_q_elem = q_elem; - /* here: locm->current.egrp == EM_EVENT_GROUP_UNDEF */ - - /* - * Call the Execution Object (EO) receive function. - * Scheduling context may be released during this. - */ - if (q_elem->use_multi_rcv) - dispatch_multi_receive(ev_tbl, ev_hdr_tbl, num_events, - q_elem, true); - else - dispatch_single_receive(ev_hdr_tbl, num_events, q_elem, true); - - /* - * Check for buffered events sent to output queues during the previous - * dispatch rounds - */ - if (!EM_OUTPUT_QUEUE_IMMEDIATE && - locm->output_queue_track.idx_cnt > 0) - output_queue_buffering_drain(); - - locm->current.q_elem = NULL; - locm->current.sched_q_elem = NULL; - locm->current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE; -} - -static inline void -dispatch_poll_ctrl_queue(void) -{ - const unsigned int poll_interval = em_shm->opt.dispatch.poll_ctrl_interval; - - /* - * Rate limit how often this core checks the unsched ctrl queue. - */ - - if (poll_interval > 1) { - em_locm_t *const locm = &em_locm; - - locm->dispatch_cnt--; - if (locm->dispatch_cnt > 0) - return; - locm->dispatch_cnt = poll_interval; - - odp_time_t now = odp_time_global(); - odp_time_t period = odp_time_diff(now, locm->dispatch_last_run); - odp_time_t poll_period = em_shm->opt.dispatch.poll_ctrl_interval_time; - - if (odp_time_cmp(period, poll_period) < 0) - return; - locm->dispatch_last_run = now; - } - - /* Poll internal unscheduled ctrl queues */ - poll_unsched_ctrl_queue(); -} - -/* - * Run a dispatch round - query the scheduler for events and dispatch - */ -static inline int -dispatch_round(void) -{ - odp_queue_t odp_queue; - odp_event_t odp_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; - em_event_t ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - queue_elem_t *queue_elem; - - dispatch_poll_ctrl_queue(); - - /* - * Schedule events to the core from queues - */ - const int num_events = - odp_schedule_multi_no_wait(&odp_queue, odp_ev_tbl, - EM_SCHED_MULTI_MAX_BURST); - if (unlikely(num_events <= 0)) { - /* - * No scheduled events available, check if the local queues - * contain anything on this core - e.g. pktio or something - * outside the dispatch-context might have sent to a local queue - */ - check_local_queues(); - return 0; - } - - queue_elem = odp_queue_context(odp_queue); - - /* Events might originate from outside of EM and need init */ - if (num_events == 1) - ev_tbl[0] = event_init_odp(odp_ev_tbl[0], true, ev_hdr_tbl/*out:1*/); - else - event_init_odp_multi(odp_ev_tbl, ev_tbl/*out*/, ev_hdr_tbl/*out*/, - num_events, true/*is_extev*/); - - if (unlikely(queue_elem == NULL || - queue_elem->state != EM_QUEUE_STATE_READY)) { - if (esv_enabled()) - evstate_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num_events, - EVSTATE__DISPATCH_SCHED__FAIL); - /* Drop all events dequeued from this queue */ - em_free_multi(ev_tbl, num_events); - - if (queue_elem == NULL) - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_DISPATCH, - "Event(s) from non-EM Q, drop %d events", - num_events); - else - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_DISPATCH, - "Q:%" PRI_QUEUE " not ready, state=%d\n" - " drop:%d event(s)\n", - queue_elem->queue, queue_elem->state, - num_events); - return 0; - } - - if (queue_elem->atomic_group == EM_ATOMIC_GROUP_UNDEF) { - em_locm.event_burst_cnt = num_events; - dispatch_events(ev_tbl, ev_hdr_tbl, num_events, queue_elem); - } else { - atomic_group_dispatch(ev_tbl, ev_hdr_tbl, - num_events, queue_elem); - } - - return num_events; -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_DISPATCHER_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal dispatcher functions + */ + +#ifndef EM_DISPATCHER_H_ +#define EM_DISPATCHER_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +em_status_t dispatch_init(void); +em_status_t dispatch_init_local(void); + +static inline void +dispatch_multi_receive(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], + const int num_events, queue_elem_t *const q_elem, + const bool check_local_qs); +static inline void +dispatch_single_receive(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], + const int num_events, queue_elem_t *const q_elem, + const bool check_local_qs); + +/** + * Helper: Remove undef-event entries from ev_tbl[] + */ +static inline int pack_ev_tbl(em_event_t ev_tbl[/*in,out*/], const int num) +{ + if (num == 1) { + if (ev_tbl[0] != EM_EVENT_UNDEF) + return 1; + else + return 0; + } + + int pack = 0; + + for (int i = 0; i < num; i++) { + if (ev_tbl[i] != EM_EVENT_UNDEF) { + if (pack < i) + ev_tbl[pack] = ev_tbl[i]; + pack++; + } + } + + return pack; +} + +static inline uint64_t debug_timestamp(void) +{ + /* compile time selection */ + return EM_DEBUG_TIMESTAMP_ENABLE == 1 ? odp_time_global_ns() : odp_time_global_strict_ns(); +} + +/** + * Run all dispatch enter-callback functions. + * + * @note Neither EO-receive nor any further enter-callbacks will be called if + * all events have been dropped by the callbacks already run, i.e. + * no callback or EO-receive will be called with 'num=0'. + * + * @param eo EO handle + * @param eo_ctx EO context data + * @param[in,out] ev_tbl Event table + * @param num_events Number of events in the event table + * @param queue Queue from which this event came from + * @param q_ctx Queue context data + * + * @return The number of events in ev_tbl[] after all dispatch enter callbacks + */ +static inline int +dispatch_enter_cb(em_eo_t eo, void **eo_ctx, + em_event_t ev_tbl[/*in,out*/], const int num_events, + em_queue_t *queue, void **q_ctx) +{ + const hook_tbl_t *cb_tbl = em_shm->dispatch_enter_cb_tbl; + em_dispatch_enter_func_t dispatch_enter_fn; + int num = num_events; + + for (int i = 0; i < EM_CALLBACKS_MAX && num > 0; i++) { + dispatch_enter_fn = cb_tbl->tbl[i].disp_enter; + if (dispatch_enter_fn == NULL) + break; + dispatch_enter_fn(eo, eo_ctx, ev_tbl, num, queue, q_ctx); + num = pack_ev_tbl(ev_tbl, num); + } + + return num; +} + +/** + * Run all dispatch exit-callback functions. + * + * @param eo EO handle + */ +static inline void +dispatch_exit_cb(em_eo_t eo) +{ + const hook_tbl_t *dispatch_exit_cb_tbl = em_shm->dispatch_exit_cb_tbl; + em_dispatch_exit_func_t dispatch_exit_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + dispatch_exit_fn = dispatch_exit_cb_tbl->tbl[i].disp_exit; + if (dispatch_exit_fn == NULL) + return; + dispatch_exit_fn(eo); + } +} + +static inline void +call_eo_receive_fn(const em_eo_t eo, const em_receive_func_t eo_receive_func, + em_event_t event, event_hdr_t *ev_hdr, queue_elem_t *const q_elem) +{ + em_locm_t *const locm = &em_locm; + em_queue_t queue = q_elem->queue; + void *queue_ctx = q_elem->context; + void *eo_ctx = q_elem->eo_ctx; + int num = 1; + + locm->current.q_elem = q_elem; + locm->current.rcv_multi_cnt = 1; + /* Check and set core local event group (before dispatch callback(s)) */ + event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); + + if (esv_enabled()) + event = evstate_em2usr(event, ev_hdr, EVSTATE__DISPATCH); + + if (EM_DISPATCH_CALLBACKS_ENABLE) { + em_event_t ev_tbl[1] = {event}; + + num = dispatch_enter_cb(eo, &eo_ctx, ev_tbl/*in,out*/, 1, + &queue, &queue_ctx); + if (num && ev_tbl[0] != event) { + /* user-callback changed event: update event & hdr */ + event = ev_tbl[0]; + ev_hdr = event_to_hdr(event); + } + } + + if (likely(num == 1)) { + em_event_type_t event_type = ev_hdr->event_type; + /* + * Call the EO receive function + * (only if the dispatch callback(s) did not free the event) + */ + eo_receive_func(eo_ctx, event, event_type, + queue, queue_ctx); + } + + if (EM_DISPATCH_CALLBACKS_ENABLE) + dispatch_exit_cb(eo); + + /* + * Event belongs to an event_group, update the count and + * if requested send notifications + */ + if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) { + /* + * Atomically decrease the event group count. + * If the new count is zero, send notification events. + */ + event_group_count_decrement(1); + } + locm->current.egrp = EM_EVENT_GROUP_UNDEF; +} + +/** + * @note All events belong to the same event group + * @note Event type dropped from multi-event receive - use em_event_get_type() + */ +static inline void +call_eo_receive_multi_fn(const em_eo_t eo, + const em_receive_multi_func_t eo_receive_multi_func, + em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], + const int num_events, queue_elem_t *const q_elem) +{ + em_locm_t *const locm = &em_locm; + em_queue_t queue = q_elem->queue; + void *queue_ctx = q_elem->context; + void *eo_ctx = q_elem->eo_ctx; + int num = num_events; + + locm->current.q_elem = q_elem; + locm->current.rcv_multi_cnt = num_events; + /* Check and set core local event group (before dispatch callback(s)) */ + event_group_set_local(ev_hdr_tbl[0]->egrp, ev_hdr_tbl[0]->egrp_gen, + num_events); + + if (esv_enabled()) + evstate_em2usr_multi(ev_tbl, ev_hdr_tbl, num_events, + EVSTATE__DISPATCH_MULTI); + + if (EM_DISPATCH_CALLBACKS_ENABLE) + num = dispatch_enter_cb(eo, &eo_ctx, + ev_tbl/*in,out*/, num_events, + &queue, &queue_ctx); + if (likely(num > 0)) { + /* + * Call the EO multi-event receive function + * (only if the dispatch callback(s) did not free all events) + */ + eo_receive_multi_func(eo_ctx, ev_tbl, num, queue, queue_ctx); + } + + if (EM_DISPATCH_CALLBACKS_ENABLE) + dispatch_exit_cb(eo); + + /* + * Event belongs to an event_group, update the count and + * if requested send notifications + */ + if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) { + /* + * Atomically decrease the event group count. + * If the new count is zero, send notification events. + */ + event_group_count_decrement(num_events); + } + locm->current.egrp = EM_EVENT_GROUP_UNDEF; +} + +static inline void +dispatch_local_queues(stash_entry_t entry_tbl[], const int num) +{ + const bool esv_ena = esv_enabled(); + int idx = 0; /* index into ev_tbl[] & ev_hdr_tbl[] */ + int ev_cnt; /* number of events to the same local-queue */ + + em_event_t ev_tbl[num]; + event_hdr_t *ev_hdr_tbl[num]; + + for (int i = 0; i < num; i++) + ev_tbl[i] = (em_event_t)(uintptr_t)entry_tbl[i].evptr; + + event_to_hdr_multi(ev_tbl, ev_hdr_tbl, num); + + /* Loop through 'num' events and dispatch in batches to local queues */ + do { + /* dst local queue */ + const int qidx = entry_tbl[idx].qidx; + const em_queue_t queue = queue_idx2hdl(qidx); + queue_elem_t *const q_elem = queue_elem_get(queue); + int i; + + /* count events sent to the same local queue */ + for (i = idx + 1; i < num && entry_tbl[i].qidx == qidx; i++) + ; + ev_cnt = i - idx; /* '1 to num' events */ + + if (unlikely(q_elem == NULL || q_elem->state != EM_QUEUE_STATE_READY)) { + if (esv_ena) + evstate_em2usr_multi(&ev_tbl[idx], + &ev_hdr_tbl[idx], ev_cnt, + EVSTATE__DISPATCH_LOCAL__FAIL); + em_free_multi(&ev_tbl[idx], ev_cnt); + /* Consider removing the logging */ + EM_LOG(EM_LOG_PRINT, + "EM info: %s(): localQ:%" PRI_QUEUE ":\n" + "Not ready - state:%d drop:%d events\n", + __func__, q_elem->queue, + q_elem->state, ev_cnt); + idx += ev_cnt; + continue; + } + + if (q_elem->use_multi_rcv) + dispatch_multi_receive(&ev_tbl[idx], + &ev_hdr_tbl[idx], + ev_cnt, q_elem, false); + else + dispatch_single_receive(&ev_tbl[idx], + &ev_hdr_tbl[idx], + ev_cnt, q_elem, false); + idx += ev_cnt; + } while (idx < num); +} + +static inline void +check_local_queues(void) +{ + if (em_locm.local_queues.empty) + return; + + /* + * Check if the previous EO receive function sent events to a + * local queue ('EM_QUEUE_TYPE_LOCAL') - and if so, dispatch + * those events immediately. + */ + stash_entry_t entry_tbl[EM_SCHED_MULTI_MAX_BURST]; + + for (;;) { + int num = next_local_queue_events(entry_tbl /*[out]*/, + EM_SCHED_MULTI_MAX_BURST); + if (num <= 0) + return; + + dispatch_local_queues(entry_tbl, num); + } +} + +/** + * Count events (hdrs) sent/tagged with the same event group + */ +static inline int +count_same_evgroup(event_hdr_t *ev_hdr_tbl[], const unsigned int num) +{ + if (unlikely(num < 2)) + return num; + + const em_event_group_t egrp = ev_hdr_tbl[0]->egrp; + unsigned int i = 1; /* 2nd hdr */ + + if (EM_EVENT_GROUP_SAFE_MODE) { + const int32_t egrp_gen = ev_hdr_tbl[0]->egrp_gen; + + for (; i < num && + egrp == ev_hdr_tbl[i]->egrp && + egrp_gen == ev_hdr_tbl[i]->egrp_gen; i++) + ; + } else { + for (; i < num && + egrp == ev_hdr_tbl[i]->egrp; i++) + ; + } + + return i; +} + +static inline void +dispatch_multi_receive(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], + const int num_events, queue_elem_t *const q_elem, + const bool check_local_qs) +{ + const em_eo_t eo = q_elem->eo; + const em_receive_multi_func_t eo_rcv_multi_fn = + q_elem->receive_multi_func; + int idx = 0; /* index into ev_hdr_tbl[] */ + int i; + int j; + + do { + /* count same event groups: 1 to num_events */ + const int egrp_cnt = count_same_evgroup(&ev_hdr_tbl[idx], + num_events - idx); + const int max = q_elem->max_events; + const int num = MIN(egrp_cnt, max); + const int rounds = egrp_cnt / num; + const int left_over = egrp_cnt % num; + + if (check_local_qs) { + em_locm_t *const locm = &em_locm; + + j = idx; + for (i = 0; i < rounds; i++) { + locm->event_burst_cnt -= num; + call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, + &ev_tbl[j], + &ev_hdr_tbl[j], + num, q_elem); + check_local_queues(); + j += num; + } + if (left_over) { + locm->event_burst_cnt = 0; + call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, + &ev_tbl[j], + &ev_hdr_tbl[j], + left_over, q_elem); + check_local_queues(); + } + } else { + j = idx; + for (i = 0; i < rounds; i++) { + call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, + &ev_tbl[j], + &ev_hdr_tbl[j], + num, q_elem); + j += num; + } + if (left_over) { + call_eo_receive_multi_fn(eo, eo_rcv_multi_fn, + &ev_tbl[j], + &ev_hdr_tbl[j], + left_over, q_elem); + } + } + + idx += egrp_cnt; + } while (idx < num_events); +} + +static inline void +dispatch_single_receive(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], + const int num_events, queue_elem_t *const q_elem, + const bool check_local_qs) +{ + const em_eo_t eo = q_elem->eo; + const em_receive_func_t eo_rcv_fn = q_elem->receive_func; + int i; + + if (check_local_qs) { + for (i = 0; i < num_events; i++) { + em_locm.event_burst_cnt--; + call_eo_receive_fn(eo, eo_rcv_fn, + ev_tbl[i], ev_hdr_tbl[i], + q_elem); + check_local_queues(); + } + } else { + for (i = 0; i < num_events; i++) + call_eo_receive_fn(eo, eo_rcv_fn, + ev_tbl[i], ev_hdr_tbl[i], + q_elem); + } +} + +/** + * Dispatch events - call the EO-receive functions and pass the + * events for processing + */ +static inline void +dispatch_events(em_event_t ev_tbl[], event_hdr_t *ev_hdr_tbl[], + const int num_events, queue_elem_t *const q_elem) +{ + em_locm_t *const locm = &em_locm; + const em_queue_type_t q_type = q_elem->type; + em_sched_context_type_t sched_ctx_type = EM_SCHED_CONTEXT_TYPE_NONE; + + if (q_type == EM_QUEUE_TYPE_ATOMIC) + sched_ctx_type = EM_SCHED_CONTEXT_TYPE_ATOMIC; + else if (q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED) + sched_ctx_type = EM_SCHED_CONTEXT_TYPE_ORDERED; + + locm->current.sched_context_type = sched_ctx_type; + locm->current.sched_q_elem = q_elem; + /* here: locm->current.egrp == EM_EVENT_GROUP_UNDEF */ + + /* + * Call the Execution Object (EO) receive function. + * Scheduling context may be released during this. + */ + if (q_elem->use_multi_rcv) + dispatch_multi_receive(ev_tbl, ev_hdr_tbl, num_events, + q_elem, true); + else + dispatch_single_receive(ev_tbl, ev_hdr_tbl, num_events, q_elem, true); + + /* + * Check for buffered events sent to output queues during the previous + * dispatch rounds + */ + if (!EM_OUTPUT_QUEUE_IMMEDIATE && + locm->output_queue_track.idx_cnt > 0) + output_queue_buffering_drain(); + + locm->current.q_elem = NULL; + locm->current.sched_q_elem = NULL; + locm->current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE; +} + +static inline void +dispatch_poll_ctrl_queue(void) +{ + const unsigned int poll_interval = em_shm->opt.dispatch.poll_ctrl_interval; + + /* + * Rate limit how often this core checks the unsched ctrl queue. + */ + + if (poll_interval > 1) { + em_locm_t *const locm = &em_locm; + + locm->dispatch_cnt--; + if (locm->dispatch_cnt > 0) + return; + locm->dispatch_cnt = poll_interval; + + odp_time_t now = odp_time_global(); + odp_time_t period = odp_time_diff(now, locm->dispatch_last_run); + odp_time_t poll_period = em_shm->opt.dispatch.poll_ctrl_interval_time; + + if (odp_time_cmp(period, poll_period) < 0) + return; + locm->dispatch_last_run = now; + } + + /* Poll internal unscheduled ctrl queues */ + poll_unsched_ctrl_queue(); +} + +/* + * Change the core state to idle and call idle hooks. If the core state changes, + * call to_idle hooks. If the core state is already idle, call while_idle hooks. + */ +static inline void +to_idle(void) +{ + if (EM_IDLE_HOOKS_ENABLE) { + em_locm_t *const locm = &em_locm; + + if (locm->idle_state == IDLE_STATE_ACTIVE) { + call_idle_hooks_to_idle(0); + locm->idle_state = IDLE_STATE_IDLE; + } else if (locm->idle_state == IDLE_STATE_IDLE) { + call_idle_hooks_while_idle(); + } + } +} + +/* + * Change the core state to active and call idle hooks. If the core state + * changes call to_active hooks. If the core state is already active no idle + * hooks will be called. + */ +static inline void +to_active(void) +{ + if (EM_IDLE_HOOKS_ENABLE) { + em_locm_t *const locm = &em_locm; + + if (locm->idle_state == IDLE_STATE_IDLE) { + call_idle_hooks_to_active(); + locm->idle_state = IDLE_STATE_ACTIVE; + } + } +} + +/* + * Run a dispatch round - query the scheduler for events and dispatch + */ +static inline int +dispatch_round(void) +{ + odp_queue_t odp_queue; + odp_event_t odp_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; + em_event_t ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + + dispatch_poll_ctrl_queue(); + + /* + * Schedule events to the core from queues + */ + if (EM_DEBUG_TIMESTAMP_ENABLE) + em_locm.debug_ts[EM_DEBUG_TSP_SCHED_ENTRY] = debug_timestamp(); + + int num = odp_schedule_multi_no_wait(&odp_queue, odp_ev_tbl, EM_SCHED_MULTI_MAX_BURST); + + if (EM_DEBUG_TIMESTAMP_ENABLE) + em_locm.debug_ts[EM_DEBUG_TSP_SCHED_RETURN] = debug_timestamp(); + + if (unlikely(num <= 0)) { + /* + * No scheduled events available, check if the local queues + * contain anything on this core - e.g. pktio or something + * outside the dispatch-context might have sent to a local queue + * Update the EM_IDLE_STATE and call idle hooks if they are + * enabled + */ + if (em_locm.local_queues.empty) { + to_idle(); + } else { + to_active(); + check_local_queues(); + } + return 0; + } + + /* If scheduled events are available, update the EM_IDLE_STATE and call + * idle hooks if they are enabled + */ + to_active(); + + queue_elem_t *const q_elem = odp_queue_context(odp_queue); + const bool not_emq = !q_elem || (EM_CHECK_LEVEL > 2 && + q_elem->valid_check != QUEUE_ELEM_VALID); + + /* Events might originate from outside of EM and need init */ + if (num == 1) + ev_tbl[0] = event_init_odp(odp_ev_tbl[0], true, ev_hdr_tbl/*out:1*/); + else + event_init_odp_multi(odp_ev_tbl, ev_tbl/*out*/, ev_hdr_tbl/*out*/, + num, true/*is_extev*/); + + if (unlikely(not_emq || q_elem->state != EM_QUEUE_STATE_READY)) { + if (esv_enabled()) + evstate_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + EVSTATE__DISPATCH_SCHED__FAIL); + /* Drop all events dequeued from this queue */ + em_free_multi(ev_tbl, num); + + if (not_emq) + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_DISPATCH, + "Event(s) from non-EM Q, drop %d events", num); + else + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_DISPATCH, + "Q:%" PRI_QUEUE " not ready, state=%d\n" + " drop:%d event(s)\n", + q_elem->queue, q_elem->state, num); + return 0; + } + + if (q_elem->atomic_group == EM_ATOMIC_GROUP_UNDEF) { + em_locm.event_burst_cnt = num; + dispatch_events(ev_tbl, ev_hdr_tbl, num, q_elem); + } else { + atomic_group_dispatch(ev_tbl, num, q_elem); + } + + return num; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_DISPATCHER_H_ */ diff --git a/src/em_dispatcher_types.h b/src/em_dispatcher_types.h index 769b501a..fcb60bcf 100644 --- a/src/em_dispatcher_types.h +++ b/src/em_dispatcher_types.h @@ -1,48 +1,60 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal dispatcher types & definitions - * - */ - -#ifndef EM_DISPATCHER_TYPES_H_ -#define EM_DISPATCHER_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef __cplusplus -} -#endif - -#endif /* EM_DISPATCHER_TYPES_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal dispatcher types & definitions + * + */ + +#ifndef EM_DISPATCHER_TYPES_H_ +#define EM_DISPATCHER_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * The idle state of a core + */ +typedef enum { + /** Undefined */ + IDLE_STATE_UNDEF = 0, + /** Core is idle */ + IDLE_STATE_IDLE = 1, + /** Core is active, processing event */ + IDLE_STATE_ACTIVE = 2 +} idle_state_t; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_DISPATCHER_TYPES_H_ */ diff --git a/src/em_eo.c b/src/em_eo.c index 10f14c2d..8b261a2b 100644 --- a/src/em_eo.c +++ b/src/em_eo.c @@ -1,1377 +1,1425 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/** - * Params for eo_local_func_call_req(). - * Init params with eo_local_func_call_param_init() before usage. - */ -typedef struct { - eo_elem_t *eo_elem; - queue_elem_t *q_elem; - int delete_queues; - uint64_t ev_id; - void (*f_done_callback)(void *arg_ptr); - int num_notif; - const em_notif_t *notif_tbl; /* notif_tbl[num_notif] */ - int exclude_current_core; - bool sync_operation; -} eo_local_func_call_param_t; - -static void -eo_local_func_call_param_init(eo_local_func_call_param_t *param); -static em_status_t -eo_local_func_call_req(const eo_local_func_call_param_t *param); - -static em_status_t -check_eo_local_status(const loc_func_retval_t *loc_func_retvals); - -static void -eo_start_done_callback(void *args); -static void -eo_start_sync_done_callback(void *args); - -static void -eo_stop_done_callback(void *args); -static void -eo_stop_sync_done_callback(void *args); - -static em_status_t -eo_remove_queue_local(const eo_elem_t *eo_elem, const queue_elem_t *q_elem); -static void -eo_remove_queue_done_callback(void *args); - -static em_status_t -eo_remove_queue_sync_local(const eo_elem_t *eo_elem, - const queue_elem_t *q_elem); -static void -eo_remove_queue_sync_done_callback(void *args); - -static em_status_t -eo_remove_queue_all_local(const eo_elem_t *eo_elem, int delete_queues); -static void -eo_remove_queue_all_done_callback(void *args); - -static em_status_t -eo_remove_queue_all_sync_local(const eo_elem_t *eo_elem, int delete_queues); -static void -eo_remove_queue_all_sync_done_callback(void *args); - -static inline eo_elem_t * -eo_poolelem2eo(const objpool_elem_t *const eo_pool_elem) -{ - return (eo_elem_t *)((uintptr_t)eo_pool_elem - - offsetof(eo_elem_t, eo_pool_elem)); -} - -em_status_t -eo_init(eo_tbl_t eo_tbl[], eo_pool_t *eo_pool) -{ - int ret; - const int cores = em_core_count(); - - memset(eo_tbl, 0, sizeof(eo_tbl_t)); - memset(eo_pool, 0, sizeof(eo_pool_t)); - - for (int i = 0; i < EM_MAX_EOS; i++) { - eo_elem_t *const eo_elem = &eo_tbl->eo_elem[i]; - /* Store EO handle */ - eo_elem->eo = eo_idx2hdl(i); - /* Initialize empty EO-queue list */ - env_spinlock_init(&eo_elem->lock); - list_init(&eo_elem->queue_list); - list_init(&eo_elem->startfn_evlist); - } - - ret = objpool_init(&eo_pool->objpool, cores); - if (ret != 0) - return EM_ERR_LIB_FAILED; - - for (int i = 0; i < EM_MAX_EOS; i++) - objpool_add(&eo_pool->objpool, i % cores, - &eo_tbl->eo_elem[i].eo_pool_elem); - - env_atomic32_init(&em_shm->eo_count); - - return EM_OK; -} - -em_eo_t -eo_alloc(void) -{ - const eo_elem_t *eo_elem; - const objpool_elem_t *eo_pool_elem; - - eo_pool_elem = objpool_rem(&em_shm->eo_pool.objpool, em_core_id()); - if (unlikely(eo_pool_elem == NULL)) - return EM_EO_UNDEF; - - eo_elem = eo_poolelem2eo(eo_pool_elem); - env_atomic32_inc(&em_shm->eo_count); - - return eo_elem->eo; -} - -em_status_t -eo_free(em_eo_t eo) -{ - eo_elem_t *eo_elem = eo_elem_get(eo); - - if (unlikely(eo_elem == NULL)) - return EM_ERR_BAD_ID; - - eo_elem->state = EM_EO_STATE_UNDEF; - - objpool_add(&em_shm->eo_pool.objpool, - eo_elem->eo_pool_elem.subpool_idx, &eo_elem->eo_pool_elem); - env_atomic32_dec(&em_shm->eo_count); - - return EM_OK; -} - -/** - * Add a queue to an EO - */ -em_status_t -eo_add_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem) -{ - queue_state_t old_state = q_elem->state; - queue_state_t new_state = EM_QUEUE_STATE_BIND; - em_status_t err; - - err = queue_state_change__check(old_state, new_state, 1/*is_setup*/); - if (unlikely(err != EM_OK)) - return err; - - q_elem->use_multi_rcv = eo_elem->use_multi_rcv; - q_elem->max_events = eo_elem->max_events; - q_elem->receive_func = eo_elem->receive_func; - q_elem->receive_multi_func = eo_elem->receive_multi_func; - - q_elem->eo = eo_elem->eo; - q_elem->eo_ctx = eo_elem->eo_ctx; - q_elem->eo_elem = eo_elem; - q_elem->state = new_state; - - /* Link the new queue into the EO's queue-list */ - env_spinlock_lock(&eo_elem->lock); - list_add(&eo_elem->queue_list, &q_elem->queue_node); - env_atomic32_inc(&eo_elem->num_queues); - env_spinlock_unlock(&eo_elem->lock); - - return EM_OK; -} - -static inline em_status_t -eo_rem_queue_locked(eo_elem_t *const eo_elem, queue_elem_t *const q_elem) -{ - queue_state_t old_state = q_elem->state; - queue_state_t new_state = EM_QUEUE_STATE_INIT; - em_status_t err; - - err = queue_state_change__check(old_state, new_state, 0/*!is_setup*/); - if (unlikely(err != EM_OK)) - return err; - - list_rem(&eo_elem->queue_list, &q_elem->queue_node); - env_atomic32_dec(&eo_elem->num_queues); - - q_elem->state = new_state; - q_elem->eo = EM_EO_UNDEF; - q_elem->eo_elem = NULL; - - return EM_OK; -} - -/** - * Remove a queue from an EO - */ -em_status_t -eo_rem_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem) -{ - em_status_t err; - - env_spinlock_lock(&eo_elem->lock); - err = eo_rem_queue_locked(eo_elem, q_elem); - env_spinlock_unlock(&eo_elem->lock); - - if (unlikely(err != EM_OK)) - return err; - - return EM_OK; -} - -/* - * Remove all queues associated with the EO. - * Note: does not delete the queues. - */ -em_status_t -eo_rem_queue_all(eo_elem_t *const eo_elem) -{ - em_status_t err = EM_OK; - queue_elem_t *q_elem; - - list_node_t *pos; - const list_node_t *list_node; - - env_spinlock_lock(&eo_elem->lock); - - /* Loop through all queues associated with the EO */ - list_for_each(&eo_elem->queue_list, pos, list_node) { - q_elem = list_node_to_queue_elem(list_node); - /* remove the queue from the EO */ - err = eo_rem_queue_locked(eo_elem, q_elem); - if (unlikely(err != EM_OK)) - break; - } /* end loop */ - - env_spinlock_unlock(&eo_elem->lock); - - return err; -} - -/* - * Delete all queues associated with the EO. - * The queue needs to be removed from the EO before the actual delete. - */ -em_status_t -eo_delete_queue_all(eo_elem_t *const eo_elem) -{ - em_status_t err = EM_OK; - queue_elem_t *q_elem; - - list_node_t *pos; - const list_node_t *list_node; - - env_spinlock_lock(&eo_elem->lock); - - /* Loop through all queues associated with the EO */ - list_for_each(&eo_elem->queue_list, pos, list_node) { - q_elem = list_node_to_queue_elem(list_node); - /* remove the queue from the EO */ - err = eo_rem_queue_locked(eo_elem, q_elem); - if (unlikely(err != EM_OK)) - break; - /* delete the queue */ - err = queue_delete(q_elem); - if (unlikely(err != EM_OK)) - break; - } /* end loop */ - - env_spinlock_unlock(&eo_elem->lock); - - return err; -} - -em_status_t -eo_start_local_req(eo_elem_t *const eo_elem, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = NULL; /* no q_elem */ - param.delete_queues = EM_FALSE; - param.ev_id = EO_START_LOCAL_REQ; - param.f_done_callback = eo_start_done_callback; - param.num_notif = num_notif; - param.notif_tbl = notif_tbl; - param.exclude_current_core = EM_FALSE; /* all cores */ - param.sync_operation = false; - - return eo_local_func_call_req(¶m); -} - -/** - * Callback function run when all start_local functions are finished, - * triggered by calling em_eo_start() when using local-start functions - */ -static void -eo_start_done_callback(void *args) -{ - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - em_status_t ret; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_START_DONE_CB, - "eo_elem is NULL!"); - return; - } - - if (check_eo_local_status(loc_func_retvals) == EM_OK) { - ret = queue_enable_all(eo_elem); /* local starts OK */ - if (ret == EM_OK) - eo_elem->state = EM_EO_STATE_RUNNING; - } - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); - - /* Send events buffered during the EO-start/local-start functions */ - eo_start_send_buffered_events(eo_elem); -} - -em_status_t -eo_start_sync_local_req(eo_elem_t *const eo_elem) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = NULL; /* no q_elem */ - param.delete_queues = EM_FALSE; - param.ev_id = EO_START_SYNC_LOCAL_REQ; - param.f_done_callback = eo_start_sync_done_callback; - param.num_notif = 0; - param.notif_tbl = NULL; - param.exclude_current_core = EM_TRUE; /* exclude this core */ - param.sync_operation = true; - - return eo_local_func_call_req(¶m); -} - -/** - * Callback function run when all start_local functions are finished, - * triggered by calling em_eo_start_sync() when using local-start functions - */ -static void -eo_start_sync_done_callback(void *args) -{ - em_locm_t *const locm = &em_locm; - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - em_status_t ret; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_START_SYNC_DONE_CB, - "eo_elem is NULL!"); - return; - } - - if (check_eo_local_status(loc_func_retvals) == EM_OK) { - ret = queue_enable_all(eo_elem); /* local starts OK */ - if (ret == EM_OK) - eo_elem->state = EM_EO_STATE_RUNNING; - } - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); - - /* Enable the caller of the sync API func to proceed (on this core) */ - locm->sync_api.in_progress = false; - - /* - * Events buffered during the EO-start/local-start functions are sent - * from em_eo_start_sync() after this. - */ -} - -/** - * Called by em_send() & variants during an EO start-function. - * - * Events sent from within the EO-start functions are buffered and sent - * after the start-operation has completed. Otherwise it would not be - * possible to reliably send events from the start-functions to the - * EO's own queues. - */ -int -eo_start_buffer_events(const em_event_t events[], int num, em_queue_t queue, - em_event_group_t event_group) -{ - event_hdr_t *ev_hdrs[num]; - eo_elem_t *const eo_elem = em_locm.start_eo_elem; - int i; - - if (unlikely(eo_elem == NULL)) - return 0; - - event_to_hdr_multi(events, ev_hdrs, num); - - env_spinlock_lock(&eo_elem->lock); - - for (i = 0; i < num; i++) { - ev_hdrs[i]->egrp = event_group; - ev_hdrs[i]->queue = queue; - list_add(&eo_elem->startfn_evlist, &ev_hdrs[i]->start_node); - } - - env_spinlock_unlock(&eo_elem->lock); - - return num; -} - -/** - * Send the buffered events at the end of the EO-start operation. - * - * Events sent from within the EO-start functions are buffered and sent - * after the start-operation has completed. Otherwise it would not be - * possible to reliably send events from the start-functions to the - * EO's own queues. - */ -void -eo_start_send_buffered_events(eo_elem_t *const eo_elem) -{ - list_node_t *pos; - list_node_t *start_node; - const event_hdr_t *ev_hdr; - const event_hdr_t *tmp_hdr; - em_event_t event; - em_event_t tmp_event; - em_queue_t queue; - em_queue_t tmp_queue; - em_event_group_t event_group; - em_event_group_t tmp_evgrp; - unsigned int ev_cnt; - unsigned int num_sent; - unsigned int i; - /* max events to send in a burst */ - const unsigned int max_ev = 32; - /* event burst storage, taken from stack, keep size reasonable */ - em_event_t events[max_ev]; - - env_spinlock_lock(&eo_elem->lock); - - /* - * Send the buffered events in bursts into the destination queue. - * - * This is startup: we can use some extra cycles to create the - * event-arrays to send in bursts. - */ - while (!list_is_empty(&eo_elem->startfn_evlist)) { - /* - * The first event of the burst determines the destination queue - * and the event group to use in em_send_group_multi() later on. - */ - start_node = list_rem_first(&eo_elem->startfn_evlist); - ev_hdr = start_node_to_event_hdr(start_node); - event_group = ev_hdr->egrp; - queue = ev_hdr->queue; - event = event_hdr_to_event(ev_hdr); - ev_cnt = 1; - - /* count events sent to the same queue with same event group */ - list_for_each(&eo_elem->startfn_evlist, pos, start_node) { - tmp_hdr = start_node_to_event_hdr(start_node); - tmp_evgrp = tmp_hdr->egrp; - tmp_queue = tmp_hdr->queue; - if (tmp_evgrp != event_group || - tmp_queue != queue) - break; - /* increment the event burst count and break on max */ - ev_cnt++; - if (ev_cnt == max_ev) - break; - } - - /* - * fill the array of events to be sent to the same queue - * note: ev_cnt <= max_ev - */ - events[0] = event; - for (i = 1; i < ev_cnt; i++) { - start_node = list_rem_first(&eo_elem->startfn_evlist); - tmp_hdr = start_node_to_event_hdr(start_node); - tmp_event = event_hdr_to_event(tmp_hdr); - events[i] = tmp_event; - } - /* send events with same destination queue and event group */ - if (event_group == EM_EVENT_GROUP_UNDEF) - num_sent = em_send_multi(events, ev_cnt, queue); - else - num_sent = em_send_group_multi(events, ev_cnt, queue, - event_group); - if (unlikely(num_sent != ev_cnt)) { - /* User's eo-start saw successful em_send, free here */ - for (i = num_sent; i < ev_cnt; i++) - em_free(events[i]); - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EO_START, - "Q:%" PRI_QUEUE " req:%u sent:%u", - queue, ev_cnt, num_sent); - } - } - - list_init(&eo_elem->startfn_evlist); /* reset list for this eo_elem */ - env_spinlock_unlock(&eo_elem->lock); -} - -em_status_t -eo_stop_local_req(eo_elem_t *const eo_elem, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = NULL; /* no q_elem */ - param.delete_queues = EM_FALSE; - param.ev_id = EO_STOP_LOCAL_REQ; - param.f_done_callback = eo_stop_done_callback; - param.num_notif = num_notif; - param.notif_tbl = notif_tbl; - param.exclude_current_core = EM_FALSE; /* all cores */ - param.sync_operation = false; - - return eo_local_func_call_req(¶m); -} - -/** - * Callback function run when all stop_local functions are finished, - * triggered by calling eo_eo_stop(). - */ -static void -eo_stop_done_callback(void *args) -{ - em_locm_t *const locm = &em_locm; - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - void *const eo_ctx = eo_elem->eo_ctx; - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_eo_t eo; - em_status_t ret; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_STOP_DONE_CB, - "eo_elem is NULL!"); - return; - } - - eo = eo_elem->eo; - (void)check_eo_local_status(loc_func_retvals); - - /* Change state here to allow em_eo_delete() from EO global stop */ - eo_elem->state = EM_EO_STATE_CREATED; /* == EO_STATE_STOPPED */ - - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO stop functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = eo; - - locm->current.q_elem = &tmp_q_elem; - /* - * Call the Global EO stop function now that all - * EO local stop functions are done. - */ - ret = eo_elem->stop_func(eo_ctx, eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - - /* - * Note: the EO might not be available after this if the EO global stop - * called em_eo_delete()! - */ - - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_DONE_CB, - "EO:%" PRI_EO " stop-func failed", eo); - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); -} - -em_status_t -eo_stop_sync_local_req(eo_elem_t *const eo_elem) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = NULL; /* no q_elem */ - param.delete_queues = EM_FALSE; - param.ev_id = EO_STOP_SYNC_LOCAL_REQ; - param.f_done_callback = eo_stop_sync_done_callback; - param.num_notif = 0; - param.notif_tbl = NULL; - param.exclude_current_core = EM_TRUE; /* exclude this core */ - param.sync_operation = true; - - return eo_local_func_call_req(¶m); -} - -/** - * Callback function run when all stop_local functions are finished, - * triggered by calling eo_eo_stop_sync(). - */ -static void -eo_stop_sync_done_callback(void *args) -{ - em_locm_t *const locm = &em_locm; - const loc_func_retval_t *loc_func_retvals = args; - const eo_elem_t *eo_elem = loc_func_retvals->eo_elem; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_STOP_SYNC_DONE_CB, - "eo_elem is NULL!"); - /* Enable the caller of the sync API func to proceed */ - locm->sync_api.in_progress = false; - return; - } - - (void)check_eo_local_status(loc_func_retvals); - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); - - /* Enable the caller of the sync API func to proceed (on this core) */ - locm->sync_api.in_progress = false; -} - -em_status_t -eo_remove_queue_local_req(eo_elem_t *const eo_elem, queue_elem_t *const q_elem, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = q_elem; - param.delete_queues = EM_FALSE; - param.ev_id = EO_REM_QUEUE_LOCAL_REQ; - param.f_done_callback = eo_remove_queue_done_callback; - param.num_notif = num_notif; - param.notif_tbl = notif_tbl; - param.exclude_current_core = EM_FALSE; /* all cores */ - param.sync_operation = false; - - return eo_local_func_call_req(¶m); -} - -static em_status_t -eo_remove_queue_local(const eo_elem_t *eo_elem, const queue_elem_t *q_elem) -{ - (void)eo_elem; - (void)q_elem; - - return EM_OK; -} - -static void -eo_remove_queue_done_callback(void *args) -{ - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - queue_elem_t *const q_elem = loc_func_retvals->q_elem; - em_status_t ret; - - if (unlikely(eo_elem == NULL || q_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_REMOVE_QUEUE_DONE_CB, - "eo_elem/q_elem is NULL!"); - return; - } - - (void)check_eo_local_status(loc_func_retvals); - - /* Remove the queue from the EO */ - ret = eo_rem_queue(eo_elem, q_elem); - - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_DONE_CB, - "EO:%" PRI_EO " remove Q:%" PRI_QUEUE " failed", - eo_elem->eo, q_elem->queue); - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); -} - -em_status_t -eo_remove_queue_sync_local_req(eo_elem_t *const eo_elem, - queue_elem_t *const q_elem) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = q_elem; - param.delete_queues = EM_FALSE; - param.ev_id = EO_REM_QUEUE_SYNC_LOCAL_REQ; - param.f_done_callback = eo_remove_queue_sync_done_callback; - param.num_notif = 0; - param.notif_tbl = NULL; - param.exclude_current_core = EM_TRUE; /* exclude this core */ - param.sync_operation = true; - - return eo_local_func_call_req(¶m); -} - -static em_status_t -eo_remove_queue_sync_local(const eo_elem_t *eo_elem, const queue_elem_t *q_elem) -{ - (void)eo_elem; - (void)q_elem; - - return EM_OK; -} - -static void -eo_remove_queue_sync_done_callback(void *args) -{ - em_locm_t *const locm = &em_locm; - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - queue_elem_t *const q_elem = loc_func_retvals->q_elem; - em_status_t ret; - - if (unlikely(eo_elem == NULL || q_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_REMOVE_QUEUE_SYNC_DONE_CB, - "eo_elem/q_elem is NULL!"); - /* Enable the caller of the sync API func to proceed */ - locm->sync_api.in_progress = false; - return; - } - - (void)check_eo_local_status(loc_func_retvals); - - /* Remove the queue from the EO */ - ret = eo_rem_queue(eo_elem, q_elem); - - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, - EM_ESCOPE_EO_REMOVE_QUEUE_SYNC_DONE_CB, - "EO:%" PRI_EO " remove Q:%" PRI_QUEUE " failed", - eo_elem->eo, q_elem->queue); - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); - - /* Enable the caller of the sync API func to proceed (on this core) */ - locm->sync_api.in_progress = false; -} - -em_status_t -eo_remove_queue_all_local_req(eo_elem_t *const eo_elem, int delete_queues, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = NULL; /* no q_elem */ - param.delete_queues = delete_queues; - param.ev_id = EO_REM_QUEUE_ALL_LOCAL_REQ; - param.f_done_callback = eo_remove_queue_all_done_callback; - param.num_notif = num_notif; - param.notif_tbl = notif_tbl; - param.exclude_current_core = EM_FALSE; /* all cores */ - param.sync_operation = false; - - return eo_local_func_call_req(¶m); -} - -static em_status_t -eo_remove_queue_all_local(const eo_elem_t *eo_elem, int delete_queues) -{ - (void)eo_elem; - (void)delete_queues; - - return EM_OK; -} - -static void -eo_remove_queue_all_done_callback(void *args) -{ - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - int delete_queues = loc_func_retvals->delete_queues; - em_status_t ret; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_DONE_CB, - "eo_elem is NULL!"); - return; - } - - (void)check_eo_local_status(loc_func_retvals); - - /* Remove or delete all the EO's queues */ - if (delete_queues) - ret = eo_delete_queue_all(eo_elem); - else - ret = eo_rem_queue_all(eo_elem); - - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL_DONE_CB, - "EO:%" PRI_EO " removing all queues failed", - eo_elem->eo); - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); -} - -em_status_t -eo_remove_queue_all_sync_local_req(eo_elem_t *const eo_elem, int delete_queues) -{ - eo_local_func_call_param_t param; - - eo_local_func_call_param_init(¶m); - param.eo_elem = eo_elem; - param.q_elem = NULL; /* no q_elem */ - param.delete_queues = delete_queues; - param.ev_id = EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ; - param.f_done_callback = eo_remove_queue_all_sync_done_callback; - param.num_notif = 0; - param.notif_tbl = NULL; - param.exclude_current_core = EM_TRUE; /* exclude this core */ - param.sync_operation = true; - - return eo_local_func_call_req(¶m); -} - -static em_status_t -eo_remove_queue_all_sync_local(const eo_elem_t *eo_elem, int delete_queues) -{ - (void)eo_elem; - (void)delete_queues; - - return EM_OK; -} - -static void -eo_remove_queue_all_sync_done_callback(void *args) -{ - em_locm_t *const locm = &em_locm; - const loc_func_retval_t *loc_func_retvals = args; - eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; - int delete_queues = loc_func_retvals->delete_queues; - em_status_t ret; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC_DONE_CB, - "eo_elem is NULL!"); - /* Enable the caller of the sync API func to proceed */ - locm->sync_api.in_progress = false; - return; - } - - (void)check_eo_local_status(loc_func_retvals); - - /* Remove or delete all the EO's queues */ - if (delete_queues) - ret = eo_delete_queue_all(eo_elem); - else - ret = eo_rem_queue_all(eo_elem); - - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC_DONE_CB, - "EO:%" PRI_EO " removing all queues failed", - eo_elem->eo); - - /* free the storage for local func return values */ - em_free(loc_func_retvals->event); - - /* Enable the caller of the sync API func to proceed (on this core) */ - locm->sync_api.in_progress = false; -} - -static em_status_t -check_eo_local_status(const loc_func_retval_t *loc_func_retvals) -{ - const int cores = em_core_count(); - static const char core_err[] = "coreXX:0x12345678 "; - char errmsg[cores * sizeof(core_err)]; - int n = 0; - int c = 0; - int local_fail = 0; - em_status_t err; - - for (int i = 0; i < cores; i++) { - err = loc_func_retvals->core[i]; - if (err != EM_OK) { - local_fail = 1; - break; - } - } - - if (!local_fail) - return EM_OK; - - for (int i = 0; i < cores; i++) { - err = loc_func_retvals->core[i]; - if (err != EM_OK) { - n = snprintf(&errmsg[c], sizeof(core_err), - "core%02d:0x%08X ", i, err); - if ((unsigned int)n >= sizeof(core_err)) - break; - c += n; - } - } - errmsg[cores * sizeof(core_err) - 1] = '\0'; - - INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EVENT_INTERNAL_LFUNC_CALL, - "\nLocal start function failed on cores:\n" - "%s", errmsg); - return EM_ERR; -} - -static void -eo_local_func_call_param_init(eo_local_func_call_param_t *param) -{ - memset(param, 0, sizeof(*param)); -} - -/** - * Request a function to be run on each core and call 'f_done_callback(arg_ptr)' - * when all those functions have completed. - */ -static em_status_t -eo_local_func_call_req(const eo_local_func_call_param_t *param) -{ - int err; - em_event_t event; - em_event_t tmp; - internal_event_t *i_event; - int core_count; - int free_count; - em_core_mask_t core_mask; - loc_func_retval_t *loc_func_retvals; - void *f_done_arg_ptr; - - core_count = em_core_count(); - em_core_mask_zero(&core_mask); - em_core_mask_set_count(core_count, &core_mask); - free_count = core_count + 1; /* all cores + 'done' event */ - if (param->exclude_current_core) { - /* EM _sync API func: exclude the calling core */ - em_core_mask_clr(em_core_id(), &core_mask); - free_count -= 1; - } - - event = em_alloc(sizeof(internal_event_t), - EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, - EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ, - "Internal event (%u) allocation failed", param->ev_id); - i_event = em_event_pointer(event); - i_event->id = param->ev_id; - i_event->loc_func.eo_elem = param->eo_elem; - i_event->loc_func.q_elem = param->q_elem; - i_event->loc_func.delete_queues = param->delete_queues; - - tmp = em_alloc(sizeof(loc_func_retval_t), - EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); - RETURN_ERROR_IF(tmp == EM_EVENT_UNDEF, - EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ, - "Internal loc_func_retval_t allocation failed"); - loc_func_retvals = em_event_pointer(tmp); - loc_func_retvals->eo_elem = param->eo_elem; - loc_func_retvals->q_elem = param->q_elem; - loc_func_retvals->delete_queues = param->delete_queues; - loc_func_retvals->event = tmp; /* store event handle for em_free() */ - env_atomic32_init(&loc_func_retvals->free_at_zero); - env_atomic32_set(&loc_func_retvals->free_at_zero, free_count); - for (int i = 0; i < core_count; i++) - loc_func_retvals->core[i] = EM_OK; - - /* ptr to retval storage so loc func calls can record retval there */ - i_event->loc_func.retvals = loc_func_retvals; - - /* Give ptr to retval storage also to 'done' function */ - f_done_arg_ptr = loc_func_retvals; - - if (em_core_mask_iszero(&core_mask)) { - /* - * Special handling when calling sync APIs with one core in use. - * Need to call both local- and done-funcs here and return. - */ - env_atomic32_inc(&loc_func_retvals->free_at_zero); - i_event__eo_local_func_call_req(i_event); - em_free(event); - param->f_done_callback(f_done_arg_ptr); - - return EM_OK; - } - - err = send_core_ctrl_events(&core_mask, event, - param->f_done_callback, f_done_arg_ptr, - param->num_notif, param->notif_tbl, - param->sync_operation); - if (unlikely(err)) { - char core_mask_str[EM_CORE_MASK_STRLEN]; - uint32_t unsent_cnt = err; - uint32_t cnt; - - em_free(event); - cnt = env_atomic32_sub_return(&loc_func_retvals->free_at_zero, - unsent_cnt + 1); - if (cnt == 0) - em_free(tmp); - - em_core_mask_tostr(core_mask_str, EM_CORE_MASK_STRLEN, - &core_mask); - return INTERNAL_ERROR(EM_ERR_LIB_FAILED, - EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ, - "send_core_ctrl_events(mask=%s) failed", - core_mask_str); - } - - return EM_OK; -} - -/** - * EM internal event handler (see em_internal_event.c&h) - * Handle the internal event requesting a local function call. - */ -void -i_event__eo_local_func_call_req(const internal_event_t *i_ev) -{ - em_locm_t *const locm = &em_locm; - const uint64_t f_type = i_ev->loc_func.id; - eo_elem_t *eo_elem = i_ev->loc_func.eo_elem; - const queue_elem_t *q_elem = i_ev->loc_func.q_elem; - int delete_queues = i_ev->loc_func.delete_queues; - loc_func_retval_t *const loc_func_retvals = i_ev->loc_func.retvals; - em_status_t status = EM_ERR; - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - - switch (f_type) { - case EO_START_SYNC_LOCAL_REQ: - if (em_core_count() == 1) { - /* - * Special handling when calling sync API with only one - * core in use: start-local() func already called by - * em_eo_start_sync() and this func called directly from - * within eo_local_func_call_req(). - */ - status = EM_OK; - break; - } - /* fallthrough */ - case EO_START_LOCAL_REQ: - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO start functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = eo_elem->eo; - locm->current.q_elem = &tmp_q_elem; - - locm->start_eo_elem = eo_elem; - status = eo_elem->start_local_func(eo_elem->eo_ctx, - eo_elem->eo); - locm->start_eo_elem = NULL; - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - break; - - case EO_STOP_SYNC_LOCAL_REQ: - if (em_core_count() == 1) { - /* - * Special handling when calling sync API with only one - * core in use: stop-local() func already called by - * em_eo_stop_sync() and this func called directly from - * within eo_local_func_call_req(). - */ - status = EM_OK; - break; - } - /* fallthrough */ - case EO_STOP_LOCAL_REQ: - if (eo_elem->stop_local_func != NULL) { - /* - * Use a tmp q_elem as the 'current q_elem' to enable - * calling em_eo_current() from the EO start functions. - * Before returning, restore the original 'current - * q_elem' from 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = eo_elem->eo; - locm->current.q_elem = &tmp_q_elem; - - status = eo_elem->stop_local_func(eo_elem->eo_ctx, - eo_elem->eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - } else { - status = EM_OK; /* No local stop func given */ - } - break; - - case EO_REM_QUEUE_LOCAL_REQ: - status = eo_remove_queue_local(eo_elem, q_elem); - break; - case EO_REM_QUEUE_SYNC_LOCAL_REQ: - status = eo_remove_queue_sync_local(eo_elem, q_elem); - break; - case EO_REM_QUEUE_ALL_LOCAL_REQ: - status = eo_remove_queue_all_local(eo_elem, delete_queues); - break; - case EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ: - status = eo_remove_queue_all_sync_local(eo_elem, delete_queues); - break; - default: - status = EM_FATAL(EM_ERR_BAD_ID); - break; - } - - if (status != EM_OK) { - /* store failing status, egrp 'done' can check if all ok */ - loc_func_retvals->core[em_core_id()] = status; - - INTERNAL_ERROR(status, EM_ESCOPE_EVENT_INTERNAL_LFUNC_CALL, - "EO:%" PRI_EO "-%s:Local func(%" PRIx64 ")fail", - eo_elem->eo, eo_elem->name, f_type); - } - - /* - * In case of setup error, determine if 'loc_func_retvals' should be - * freed here, in the setup code in eo_local_func_call_req() or - * normally in a successful case in the - * eo_start/stop_local__done_callback() function when the event group - * completion notif is handled. - */ - const uint32_t cnt = - env_atomic32_sub_return(&loc_func_retvals->free_at_zero, 1); - if (unlikely(cnt == 0)) { - (void)check_eo_local_status(loc_func_retvals); - em_free(loc_func_retvals->event); - } -} - -unsigned int -eo_count(void) -{ - return env_atomic32_get(&em_shm->eo_count); -} - -size_t eo_get_name(const eo_elem_t *const eo_elem, - char name[/*out*/], const size_t maxlen) -{ - size_t len; - - len = strnlen(eo_elem->name, sizeof(eo_elem->name) - 1); - if (maxlen - 1 < len) - len = maxlen - 1; - - memcpy(name, eo_elem->name, len); - name[len] = '\0'; - - return len; -} - -static const char *state_to_str(em_eo_state_t state) -{ - const char *state_str; - - switch (state) { - case EM_EO_STATE_UNDEF: - state_str = "UNDEF"; - break; - case EM_EO_STATE_CREATED: - state_str = "CREATED"; - break; - case EM_EO_STATE_STARTING: - state_str = "STARTING"; - break; - case EM_EO_STATE_RUNNING: - state_str = "RUNNING"; - break; - case EM_EO_STATE_STOPPING: - state_str = "STOPPING"; - break; - case EM_EO_STATE_ERROR: - state_str = "ERROR"; - break; - default: - state_str = "UNKNOWN"; - break; - } - - return state_str; -} - -#define EO_INFO_HDR_FMT \ -"Number of EOs: %d\n\n" \ -"ID Name State Start-local Stop-local" \ -" Multi-rcv Max-events Err-hdl Q-num EO-ctx\n" \ -"---------------------------------------------------------------------------" \ -"-----------------------------------------------\n%s\n" - -#define EO_INFO_LEN 123 -#define EO_INFO_FMT "%-10" PRI_EO "%-32s%-10s%-13c%-12c%-11c%-12d%-9c%-7d%-6c\n" - -void eo_info_print_all(void) -{ - unsigned int num_eo; - eo_elem_t *eo_elem; - int len = 0; - int n_print = 0; - em_eo_t eo = em_eo_get_first(&num_eo); - - /* - * num_eo may not match the amount of EOs actually returned by iterating - * using em_eo_get_next() if EOs are added or removed in parallel by - * another core. Thus space for 10 extra EOs is reserved. If more than 10 - * EOs are added by other cores in parallel, we only print information of - * the (num_eo + 10) EOs. - * - * The extra 1 byte is reserved for the terminating null byte. - */ - const int eo_info_str_len = (num_eo + 10) * EO_INFO_LEN + 1; - char eo_info_str[eo_info_str_len]; - - while (eo != EM_EO_UNDEF) { - eo_elem = eo_elem_get(eo); - if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { - eo = em_eo_get_next(); - continue; - } - - n_print = snprintf(eo_info_str + len, - eo_info_str_len - len, - EO_INFO_FMT, eo, eo_elem->name, - state_to_str(eo_elem->state), - eo_elem->start_local_func ? 'Y' : 'N', - eo_elem->stop_local_func ? 'Y' : 'N', - eo_elem->use_multi_rcv ? 'Y' : 'N', - eo_elem->max_events, - eo_elem->error_handler_func ? 'Y' : 'N', - env_atomic32_get(&eo_elem->num_queues), - eo_elem->eo_ctx ? 'Y' : 'N'); - - /* Not enough space to hold more eo info */ - if (n_print >= eo_info_str_len - len) - break; - - len += n_print; - eo = em_eo_get_next(); - } - - /* No EO */ - if (!len) { - EM_PRINT("No EO has been created!\n"); - return; - } - - /* - * To prevent printing incomplete information of the last eo when there - * is not enough space to hold all eo info. - */ - eo_info_str[len] = '\0'; - EM_PRINT(EO_INFO_HDR_FMT, num_eo, eo_info_str); -} - -#define EO_Q_INFO_HDR_FMT \ -"EO %" PRI_EO "(%s) has %d queue(s):\n\n" \ -"Handle Name Priority Type State Qgrp" \ -" Ctx\n" \ -"---------------------------------------------------------------------------" \ -"---------\n" \ -"%s\n" - -#define EO_Q_INFO_LEN 85 -#define EO_Q_INFO_FMT \ -"%-10" PRI_QUEUE "%-32s%-10d%-10s%-9s%-10" PRI_QGRP "%-3c\n" /*85 characters*/ - -void eo_queue_info_print(em_eo_t eo) -{ - unsigned int q_num; - em_queue_t q; - const queue_elem_t *q_elem; - char q_name[EM_QUEUE_NAME_LEN]; - int len = 0; - int n_print = 0; - const eo_elem_t *eo_elem = eo_elem_get(eo); - - if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { - EM_PRINT("EO %" PRI_EO " is not created!\n", eo); - return; - } - - q = em_eo_queue_get_first(&q_num, eo); - - /* - * q_num may not match the amount of queues actually returned by iterating - * using em_eo_queue_get_next() if queues are added or removed in parallel - * by another core. Thus space for 10 extra queues is reserved. If more - * than 10 queues are added to this EO by other cores, we only print info - * of the (q_num + 10) queues. - * - * The extra 1 byte is reserved for the terminating null byte. - */ - const int eo_q_info_str_len = (q_num + 10) * EO_Q_INFO_LEN + 1; - char eo_q_info_str[eo_q_info_str_len]; - - while (q != EM_QUEUE_UNDEF) { - q_elem = queue_elem_get(q); - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { - q = em_eo_queue_get_next(); - continue; - } - - queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1); - - n_print = snprintf(eo_q_info_str + len, - eo_q_info_str_len - len, - EO_Q_INFO_FMT, - q, q_name, q_elem->priority, - queue_get_type_str(q_elem->type), - queue_get_state_str(q_elem->state), - q_elem->queue_group, - q_elem->context ? 'Y' : 'N'); - - /* Not enough space to hold more queue info */ - if (n_print >= eo_q_info_str_len - len) - break; - - len += n_print; - q = em_eo_queue_get_next(); - } - - /* EO has no queue */ - if (!len) { - EM_PRINT("EO %" PRI_EO "(%s) has no queue!\n", eo, eo_elem->name); - return; - } - - /* - * To prevent printing incomplete information of the last queue when - * there is not enough space to hold all queue info. - */ - eo_q_info_str[len] = '\0'; - EM_PRINT(EO_Q_INFO_HDR_FMT, eo, eo_elem->name, q_num, eo_q_info_str); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/** + * Params for eo_local_func_call_req(). + * Init params with eo_local_func_call_param_init() before usage. + */ +typedef struct { + eo_elem_t *eo_elem; + queue_elem_t *q_elem; + int delete_queues; + uint64_t ev_id; + void (*f_done_callback)(void *arg_ptr); + int num_notif; + const em_notif_t *notif_tbl; /* notif_tbl[num_notif] */ + int exclude_current_core; + bool sync_operation; +} eo_local_func_call_param_t; + +static void +eo_local_func_call_param_init(eo_local_func_call_param_t *param); +static em_status_t +eo_local_func_call_req(const eo_local_func_call_param_t *param); + +static em_status_t +check_eo_local_status(const loc_func_retval_t *loc_func_retvals); + +static void +eo_start_done_callback(void *args); +static void +eo_start_sync_done_callback(void *args); + +static void +eo_stop_done_callback(void *args); +static void +eo_stop_sync_done_callback(void *args); + +static em_status_t +eo_remove_queue_local(const eo_elem_t *eo_elem, const queue_elem_t *q_elem); +static void +eo_remove_queue_done_callback(void *args); + +static em_status_t +eo_remove_queue_sync_local(const eo_elem_t *eo_elem, + const queue_elem_t *q_elem); +static void +eo_remove_queue_sync_done_callback(void *args); + +static em_status_t +eo_remove_queue_all_local(const eo_elem_t *eo_elem, int delete_queues); +static void +eo_remove_queue_all_done_callback(void *args); + +static em_status_t +eo_remove_queue_all_sync_local(const eo_elem_t *eo_elem, int delete_queues); +static void +eo_remove_queue_all_sync_done_callback(void *args); + +static inline eo_elem_t * +eo_poolelem2eo(const objpool_elem_t *const eo_pool_elem) +{ + return (eo_elem_t *)((uintptr_t)eo_pool_elem - + offsetof(eo_elem_t, eo_pool_elem)); +} + +em_status_t +eo_init(eo_tbl_t eo_tbl[], eo_pool_t *eo_pool) +{ + int ret; + const int cores = em_core_count(); + + memset(eo_tbl, 0, sizeof(eo_tbl_t)); + memset(eo_pool, 0, sizeof(eo_pool_t)); + + for (int i = 0; i < EM_MAX_EOS; i++) { + eo_elem_t *const eo_elem = &eo_tbl->eo_elem[i]; + /* Store EO handle */ + eo_elem->eo = eo_idx2hdl(i); + /* Initialize empty EO-queue list */ + env_spinlock_init(&eo_elem->lock); + list_init(&eo_elem->queue_list); + eo_elem->stash = ODP_STASH_INVALID; + } + + ret = objpool_init(&eo_pool->objpool, cores); + if (ret != 0) + return EM_ERR_LIB_FAILED; + + for (int i = 0; i < EM_MAX_EOS; i++) + objpool_add(&eo_pool->objpool, i % cores, + &eo_tbl->eo_elem[i].eo_pool_elem); + + env_atomic32_init(&em_shm->eo_count); + + return EM_OK; +} + +em_eo_t +eo_alloc(void) +{ + const eo_elem_t *eo_elem; + const objpool_elem_t *eo_pool_elem; + + eo_pool_elem = objpool_rem(&em_shm->eo_pool.objpool, em_core_id()); + if (unlikely(eo_pool_elem == NULL)) + return EM_EO_UNDEF; + + eo_elem = eo_poolelem2eo(eo_pool_elem); + env_atomic32_inc(&em_shm->eo_count); + + return eo_elem->eo; +} + +em_status_t +eo_free(em_eo_t eo) +{ + eo_elem_t *eo_elem = eo_elem_get(eo); + + if (unlikely(eo_elem == NULL)) + return EM_ERR_BAD_ID; + + eo_elem->state = EM_EO_STATE_UNDEF; + + objpool_add(&em_shm->eo_pool.objpool, + eo_elem->eo_pool_elem.subpool_idx, &eo_elem->eo_pool_elem); + env_atomic32_dec(&em_shm->eo_count); + + return EM_OK; +} + +/** + * Add a queue to an EO + */ +em_status_t +eo_add_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem) +{ + queue_state_t old_state = q_elem->state; + queue_state_t new_state = EM_QUEUE_STATE_BIND; + em_status_t err; + + err = queue_state_change__check(old_state, new_state, 1/*is_setup*/); + if (unlikely(err != EM_OK)) + return err; + + q_elem->use_multi_rcv = eo_elem->use_multi_rcv; + q_elem->max_events = eo_elem->max_events; + q_elem->receive_func = eo_elem->receive_func; + q_elem->receive_multi_func = eo_elem->receive_multi_func; + + q_elem->eo = eo_elem->eo; + q_elem->eo_ctx = eo_elem->eo_ctx; + q_elem->eo_elem = eo_elem; + q_elem->state = new_state; + + /* Link the new queue into the EO's queue-list */ + env_spinlock_lock(&eo_elem->lock); + list_add(&eo_elem->queue_list, &q_elem->queue_node); + env_atomic32_inc(&eo_elem->num_queues); + env_spinlock_unlock(&eo_elem->lock); + + return EM_OK; +} + +static inline em_status_t +eo_rem_queue_locked(eo_elem_t *const eo_elem, queue_elem_t *const q_elem) +{ + queue_state_t old_state = q_elem->state; + queue_state_t new_state = EM_QUEUE_STATE_INIT; + em_status_t err; + + err = queue_state_change__check(old_state, new_state, 0/*!is_setup*/); + if (unlikely(err != EM_OK)) + return err; + + list_rem(&eo_elem->queue_list, &q_elem->queue_node); + env_atomic32_dec(&eo_elem->num_queues); + + q_elem->state = new_state; + q_elem->eo = EM_EO_UNDEF; + q_elem->eo_elem = NULL; + + return EM_OK; +} + +/** + * Remove a queue from an EO + */ +em_status_t +eo_rem_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem) +{ + em_status_t err; + + env_spinlock_lock(&eo_elem->lock); + err = eo_rem_queue_locked(eo_elem, q_elem); + env_spinlock_unlock(&eo_elem->lock); + + if (unlikely(err != EM_OK)) + return err; + + return EM_OK; +} + +/* + * Remove all queues associated with the EO. + * Note: does not delete the queues. + */ +em_status_t +eo_rem_queue_all(eo_elem_t *const eo_elem) +{ + em_status_t err = EM_OK; + queue_elem_t *q_elem; + + list_node_t *pos; + const list_node_t *list_node; + + env_spinlock_lock(&eo_elem->lock); + + /* Loop through all queues associated with the EO */ + list_for_each(&eo_elem->queue_list, pos, list_node) { + q_elem = list_node_to_queue_elem(list_node); + /* remove the queue from the EO */ + err = eo_rem_queue_locked(eo_elem, q_elem); + if (unlikely(err != EM_OK)) + break; + } /* end loop */ + + env_spinlock_unlock(&eo_elem->lock); + + return err; +} + +/* + * Delete all queues associated with the EO. + * The queue needs to be removed from the EO before the actual delete. + */ +em_status_t +eo_delete_queue_all(eo_elem_t *const eo_elem) +{ + em_status_t err = EM_OK; + queue_elem_t *q_elem; + + list_node_t *pos; + const list_node_t *list_node; + + env_spinlock_lock(&eo_elem->lock); + + /* Loop through all queues associated with the EO */ + list_for_each(&eo_elem->queue_list, pos, list_node) { + q_elem = list_node_to_queue_elem(list_node); + /* remove the queue from the EO */ + err = eo_rem_queue_locked(eo_elem, q_elem); + if (unlikely(err != EM_OK)) + break; + /* delete the queue */ + err = queue_delete(q_elem); + if (unlikely(err != EM_OK)) + break; + } /* end loop */ + + env_spinlock_unlock(&eo_elem->lock); + + return err; +} + +em_status_t +eo_start_local_req(eo_elem_t *const eo_elem, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = NULL; /* no q_elem */ + param.delete_queues = EM_FALSE; + param.ev_id = EO_START_LOCAL_REQ; + param.f_done_callback = eo_start_done_callback; + param.num_notif = num_notif; + param.notif_tbl = notif_tbl; + param.exclude_current_core = EM_FALSE; /* all cores */ + param.sync_operation = false; + + return eo_local_func_call_req(¶m); +} + +/** + * Callback function run when all start_local functions are finished, + * triggered by calling em_eo_start() when using local-start functions + */ +static void +eo_start_done_callback(void *args) +{ + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + em_status_t ret; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_START_DONE_CB, + "eo_elem is NULL!"); + return; + } + + if (check_eo_local_status(loc_func_retvals) == EM_OK) { + ret = queue_enable_all(eo_elem); /* local starts OK */ + if (ret == EM_OK) + eo_elem->state = EM_EO_STATE_RUNNING; + } + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); + + /* Send events buffered during the EO-start/local-start functions */ + eo_start_send_buffered_events(eo_elem); +} + +em_status_t +eo_start_sync_local_req(eo_elem_t *const eo_elem) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = NULL; /* no q_elem */ + param.delete_queues = EM_FALSE; + param.ev_id = EO_START_SYNC_LOCAL_REQ; + param.f_done_callback = eo_start_sync_done_callback; + param.num_notif = 0; + param.notif_tbl = NULL; + param.exclude_current_core = EM_TRUE; /* exclude this core */ + param.sync_operation = true; + + return eo_local_func_call_req(¶m); +} + +/** + * Callback function run when all start_local functions are finished, + * triggered by calling em_eo_start_sync() when using local-start functions + */ +static void +eo_start_sync_done_callback(void *args) +{ + em_locm_t *const locm = &em_locm; + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + em_status_t ret; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_START_SYNC_DONE_CB, + "eo_elem is NULL!"); + return; + } + + if (check_eo_local_status(loc_func_retvals) == EM_OK) { + ret = queue_enable_all(eo_elem); /* local starts OK */ + if (ret == EM_OK) + eo_elem->state = EM_EO_STATE_RUNNING; + } + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); + + /* Enable the caller of the sync API func to proceed (on this core) */ + locm->sync_api.in_progress = false; + + /* + * Events buffered during the EO-start/local-start functions are sent + * from em_eo_start_sync() after this. + */ +} + +/** + * Called by em_send() & variants during an EO start-function. + * + * Events sent from within the EO-start functions are buffered and sent + * after the start-operation has completed. Otherwise it would not be + * possible to reliably send events from the start-functions to the + * EO's own queues. + */ +int eo_start_buffer_events(const em_event_t events[], int num, em_queue_t queue) +{ + eo_elem_t *const eo_elem = em_locm.start_eo_elem; + const uint16_t qidx = queue_hdl2idx(queue); + const evhdl_t *const evhdl_tbl = (const evhdl_t *const)events; + stash_entry_t entry_tbl[num]; + + if (unlikely(eo_elem == NULL)) + return 0; + + env_spinlock_lock(&eo_elem->lock); + + for (int i = 0; i < num; i++) { + entry_tbl[i].qidx = qidx; + entry_tbl[i].evptr = evhdl_tbl[i].evptr; /* ESV evgen dropped */ + } + + /* Enqueue events to internal queue */ + int ret = odp_stash_put_u64(eo_elem->stash, &entry_tbl[0].u64, num); + + if (unlikely(ret < 0)) + ret = 0; + + env_spinlock_unlock(&eo_elem->lock); + + return ret; +} + +/** + * Send the buffered events at the end of the EO-start operation. + * + * Events sent from within the EO-start functions are buffered and sent + * after the start-operation has completed. Otherwise it would not be + * possible to reliably send events from the start-functions to the + * EO's own queues. + */ +void eo_start_send_buffered_events(eo_elem_t *const eo_elem) +{ + /* max events to send in a burst */ + const unsigned int max_ev = 32; + stash_entry_t entry_tbl[max_ev]; + em_event_t ev_tbl[max_ev]; + event_hdr_t *ev_hdr_tbl[max_ev]; + + env_spinlock_lock(&eo_elem->lock); + + /* + * Send the buffered events in bursts into the destination queue. + * + * This is startup: we can use some extra cycles to create the + * event-arrays to send in bursts. + */ + int err = 0; + int num = 0; + + do { + num = odp_stash_get_u64(eo_elem->stash, &entry_tbl[0].u64 /*[out]*/, max_ev); + if (num <= 0) { + if (unlikely(num < 0)) + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EO_START, + "odp_stash_get_u64() fails: %d", num); + goto buffered_send_exit; + } + + for (int i = 0; i < num; i++) + ev_tbl[i] = (em_event_t)(uintptr_t)entry_tbl[i].evptr; + + event_to_hdr_multi(ev_tbl, ev_hdr_tbl, num); + + if (esv_enabled()) + evstate_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + EVSTATE__EO_START_SEND_BUFFERED); + + int tbl_idx = 0; /* index into ..._tbl[] */ + + /* + * Dispatch in batches of 'batch_cnt' events. + * Each batch contains events from the same queue & evgrp. + */ + do { + const int qidx = entry_tbl[tbl_idx].qidx; + const em_queue_t queue = queue_idx2hdl(qidx); + const em_event_group_t event_group = ev_hdr_tbl[tbl_idx]->egrp; + int num_sent = 0; + int batch_cnt = 1; + + for (int i = tbl_idx + 1; i < num && + entry_tbl[i].qidx == qidx && + ev_hdr_tbl[i]->egrp == event_group; i++) { + batch_cnt++; + } + + /* send events with same destination queue and event group */ + if (event_group == EM_EVENT_GROUP_UNDEF) + num_sent = em_send_multi(&ev_tbl[tbl_idx], batch_cnt, queue); + else + num_sent = em_send_group_multi(&ev_tbl[tbl_idx], batch_cnt, queue, + event_group); + if (unlikely(num_sent != batch_cnt)) { + /* User's eo-start saw successful em_send, free here */ + for (int i = num_sent; i < batch_cnt; i++) + em_free(ev_tbl[i]); + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EO_START, + "Q:%" PRI_QUEUE " req:%u sent:%u", + queue, batch_cnt, num_sent); + } + + tbl_idx += batch_cnt; + } while (tbl_idx < num); + } while (num > 0); + +buffered_send_exit: + err = odp_stash_destroy(eo_elem->stash); + + eo_elem->stash = ODP_STASH_INVALID; + env_spinlock_unlock(&eo_elem->lock); + + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EO_START, + "odp_stash_destroy() fails: %d", err); + } +} + +em_status_t +eo_stop_local_req(eo_elem_t *const eo_elem, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = NULL; /* no q_elem */ + param.delete_queues = EM_FALSE; + param.ev_id = EO_STOP_LOCAL_REQ; + param.f_done_callback = eo_stop_done_callback; + param.num_notif = num_notif; + param.notif_tbl = notif_tbl; + param.exclude_current_core = EM_FALSE; /* all cores */ + param.sync_operation = false; + + return eo_local_func_call_req(¶m); +} + +/** + * Callback function run when all stop_local functions are finished, + * triggered by calling eo_eo_stop(). + */ +static void +eo_stop_done_callback(void *args) +{ + em_locm_t *const locm = &em_locm; + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + void *const eo_ctx = eo_elem->eo_ctx; + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_eo_t eo; + em_status_t ret; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_STOP_DONE_CB, + "eo_elem is NULL!"); + return; + } + + eo = eo_elem->eo; + (void)check_eo_local_status(loc_func_retvals); + + /* Change state here to allow em_eo_delete() from EO global stop */ + eo_elem->state = EM_EO_STATE_CREATED; /* == EO_STATE_STOPPED */ + + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO stop functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = eo; + + locm->current.q_elem = &tmp_q_elem; + /* + * Call the Global EO stop function now that all + * EO local stop functions are done. + */ + ret = eo_elem->stop_func(eo_ctx, eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + + /* + * Note: the EO might not be available after this if the EO global stop + * called em_eo_delete()! + */ + + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_DONE_CB, + "EO:%" PRI_EO " stop-func failed", eo); + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); +} + +em_status_t +eo_stop_sync_local_req(eo_elem_t *const eo_elem) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = NULL; /* no q_elem */ + param.delete_queues = EM_FALSE; + param.ev_id = EO_STOP_SYNC_LOCAL_REQ; + param.f_done_callback = eo_stop_sync_done_callback; + param.num_notif = 0; + param.notif_tbl = NULL; + param.exclude_current_core = EM_TRUE; /* exclude this core */ + param.sync_operation = true; + + return eo_local_func_call_req(¶m); +} + +/** + * Callback function run when all stop_local functions are finished, + * triggered by calling eo_eo_stop_sync(). + */ +static void +eo_stop_sync_done_callback(void *args) +{ + em_locm_t *const locm = &em_locm; + const loc_func_retval_t *loc_func_retvals = args; + const eo_elem_t *eo_elem = loc_func_retvals->eo_elem; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_STOP_SYNC_DONE_CB, + "eo_elem is NULL!"); + /* Enable the caller of the sync API func to proceed */ + locm->sync_api.in_progress = false; + return; + } + + (void)check_eo_local_status(loc_func_retvals); + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); + + /* Enable the caller of the sync API func to proceed (on this core) */ + locm->sync_api.in_progress = false; +} + +em_status_t +eo_remove_queue_local_req(eo_elem_t *const eo_elem, queue_elem_t *const q_elem, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = q_elem; + param.delete_queues = EM_FALSE; + param.ev_id = EO_REM_QUEUE_LOCAL_REQ; + param.f_done_callback = eo_remove_queue_done_callback; + param.num_notif = num_notif; + param.notif_tbl = notif_tbl; + param.exclude_current_core = EM_FALSE; /* all cores */ + param.sync_operation = false; + + return eo_local_func_call_req(¶m); +} + +static em_status_t +eo_remove_queue_local(const eo_elem_t *eo_elem, const queue_elem_t *q_elem) +{ + (void)eo_elem; + (void)q_elem; + + return EM_OK; +} + +static void +eo_remove_queue_done_callback(void *args) +{ + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + queue_elem_t *const q_elem = loc_func_retvals->q_elem; + em_status_t ret; + + if (unlikely(eo_elem == NULL || q_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_REMOVE_QUEUE_DONE_CB, + "eo_elem/q_elem is NULL!"); + return; + } + + (void)check_eo_local_status(loc_func_retvals); + + /* Remove the queue from the EO */ + ret = eo_rem_queue(eo_elem, q_elem); + + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_DONE_CB, + "EO:%" PRI_EO " remove Q:%" PRI_QUEUE " failed", + eo_elem->eo, q_elem->queue); + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); +} + +em_status_t +eo_remove_queue_sync_local_req(eo_elem_t *const eo_elem, + queue_elem_t *const q_elem) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = q_elem; + param.delete_queues = EM_FALSE; + param.ev_id = EO_REM_QUEUE_SYNC_LOCAL_REQ; + param.f_done_callback = eo_remove_queue_sync_done_callback; + param.num_notif = 0; + param.notif_tbl = NULL; + param.exclude_current_core = EM_TRUE; /* exclude this core */ + param.sync_operation = true; + + return eo_local_func_call_req(¶m); +} + +static em_status_t +eo_remove_queue_sync_local(const eo_elem_t *eo_elem, const queue_elem_t *q_elem) +{ + (void)eo_elem; + (void)q_elem; + + return EM_OK; +} + +static void +eo_remove_queue_sync_done_callback(void *args) +{ + em_locm_t *const locm = &em_locm; + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + queue_elem_t *const q_elem = loc_func_retvals->q_elem; + em_status_t ret; + + if (unlikely(eo_elem == NULL || q_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_REMOVE_QUEUE_SYNC_DONE_CB, + "eo_elem/q_elem is NULL!"); + /* Enable the caller of the sync API func to proceed */ + locm->sync_api.in_progress = false; + return; + } + + (void)check_eo_local_status(loc_func_retvals); + + /* Remove the queue from the EO */ + ret = eo_rem_queue(eo_elem, q_elem); + + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, + EM_ESCOPE_EO_REMOVE_QUEUE_SYNC_DONE_CB, + "EO:%" PRI_EO " remove Q:%" PRI_QUEUE " failed", + eo_elem->eo, q_elem->queue); + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); + + /* Enable the caller of the sync API func to proceed (on this core) */ + locm->sync_api.in_progress = false; +} + +em_status_t +eo_remove_queue_all_local_req(eo_elem_t *const eo_elem, int delete_queues, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = NULL; /* no q_elem */ + param.delete_queues = delete_queues; + param.ev_id = EO_REM_QUEUE_ALL_LOCAL_REQ; + param.f_done_callback = eo_remove_queue_all_done_callback; + param.num_notif = num_notif; + param.notif_tbl = notif_tbl; + param.exclude_current_core = EM_FALSE; /* all cores */ + param.sync_operation = false; + + return eo_local_func_call_req(¶m); +} + +static em_status_t +eo_remove_queue_all_local(const eo_elem_t *eo_elem, int delete_queues) +{ + (void)eo_elem; + (void)delete_queues; + + return EM_OK; +} + +static void +eo_remove_queue_all_done_callback(void *args) +{ + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + int delete_queues = loc_func_retvals->delete_queues; + em_status_t ret; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_DONE_CB, + "eo_elem is NULL!"); + return; + } + + (void)check_eo_local_status(loc_func_retvals); + + /* Remove or delete all the EO's queues */ + if (delete_queues) + ret = eo_delete_queue_all(eo_elem); + else + ret = eo_rem_queue_all(eo_elem); + + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL_DONE_CB, + "EO:%" PRI_EO " removing all queues failed", + eo_elem->eo); + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); +} + +em_status_t +eo_remove_queue_all_sync_local_req(eo_elem_t *const eo_elem, int delete_queues) +{ + eo_local_func_call_param_t param; + + eo_local_func_call_param_init(¶m); + param.eo_elem = eo_elem; + param.q_elem = NULL; /* no q_elem */ + param.delete_queues = delete_queues; + param.ev_id = EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ; + param.f_done_callback = eo_remove_queue_all_sync_done_callback; + param.num_notif = 0; + param.notif_tbl = NULL; + param.exclude_current_core = EM_TRUE; /* exclude this core */ + param.sync_operation = true; + + return eo_local_func_call_req(¶m); +} + +static em_status_t +eo_remove_queue_all_sync_local(const eo_elem_t *eo_elem, int delete_queues) +{ + (void)eo_elem; + (void)delete_queues; + + return EM_OK; +} + +static void +eo_remove_queue_all_sync_done_callback(void *args) +{ + em_locm_t *const locm = &em_locm; + const loc_func_retval_t *loc_func_retvals = args; + eo_elem_t *const eo_elem = loc_func_retvals->eo_elem; + int delete_queues = loc_func_retvals->delete_queues; + em_status_t ret; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC_DONE_CB, + "eo_elem is NULL!"); + /* Enable the caller of the sync API func to proceed */ + locm->sync_api.in_progress = false; + return; + } + + (void)check_eo_local_status(loc_func_retvals); + + /* Remove or delete all the EO's queues */ + if (delete_queues) + ret = eo_delete_queue_all(eo_elem); + else + ret = eo_rem_queue_all(eo_elem); + + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC_DONE_CB, + "EO:%" PRI_EO " removing all queues failed", + eo_elem->eo); + + /* free the storage for local func return values */ + em_free(loc_func_retvals->event); + + /* Enable the caller of the sync API func to proceed (on this core) */ + locm->sync_api.in_progress = false; +} + +static em_status_t +check_eo_local_status(const loc_func_retval_t *loc_func_retvals) +{ + const int cores = em_core_count(); + static const char core_err[] = "coreXX:0x12345678 "; + char errmsg[cores * sizeof(core_err)]; + int n = 0; + int c = 0; + int local_fail = 0; + em_status_t err; + + for (int i = 0; i < cores; i++) { + err = loc_func_retvals->core[i]; + if (err != EM_OK) { + local_fail = 1; + break; + } + } + + if (!local_fail) + return EM_OK; + + for (int i = 0; i < cores; i++) { + err = loc_func_retvals->core[i]; + if (err != EM_OK) { + n = snprintf(&errmsg[c], sizeof(core_err), + "core%02d:0x%08X ", i, err); + if ((unsigned int)n >= sizeof(core_err)) + break; + c += n; + } + } + errmsg[cores * sizeof(core_err) - 1] = '\0'; + + INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EVENT_INTERNAL_LFUNC_CALL, + "\nLocal start function failed on cores:\n" + "%s", errmsg); + return EM_ERR; +} + +static void +eo_local_func_call_param_init(eo_local_func_call_param_t *param) +{ + memset(param, 0, sizeof(*param)); +} + +/** + * Request a function to be run on each core and call 'f_done_callback(arg_ptr)' + * when all those functions have completed. + */ +static em_status_t +eo_local_func_call_req(const eo_local_func_call_param_t *param) +{ + int err; + em_event_t event; + em_event_t tmp; + internal_event_t *i_event; + int core_count; + int free_count; + em_core_mask_t core_mask; + loc_func_retval_t *loc_func_retvals; + void *f_done_arg_ptr; + + core_count = em_core_count(); + em_core_mask_zero(&core_mask); + em_core_mask_set_count(core_count, &core_mask); + free_count = core_count + 1; /* all cores + 'done' event */ + if (param->exclude_current_core) { + /* EM _sync API func: exclude the calling core */ + em_core_mask_clr(em_core_id(), &core_mask); + free_count -= 1; + } + + event = em_alloc(sizeof(internal_event_t), + EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, + EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ, + "Internal event (%u) allocation failed", param->ev_id); + i_event = em_event_pointer(event); + i_event->id = param->ev_id; + i_event->loc_func.eo_elem = param->eo_elem; + i_event->loc_func.q_elem = param->q_elem; + i_event->loc_func.delete_queues = param->delete_queues; + + tmp = em_alloc(sizeof(loc_func_retval_t), + EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + RETURN_ERROR_IF(tmp == EM_EVENT_UNDEF, + EM_ERR_ALLOC_FAILED, EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ, + "Internal loc_func_retval_t allocation failed"); + loc_func_retvals = em_event_pointer(tmp); + loc_func_retvals->eo_elem = param->eo_elem; + loc_func_retvals->q_elem = param->q_elem; + loc_func_retvals->delete_queues = param->delete_queues; + loc_func_retvals->event = tmp; /* store event handle for em_free() */ + env_atomic32_init(&loc_func_retvals->free_at_zero); + env_atomic32_set(&loc_func_retvals->free_at_zero, free_count); + for (int i = 0; i < core_count; i++) + loc_func_retvals->core[i] = EM_OK; + + /* ptr to retval storage so loc func calls can record retval there */ + i_event->loc_func.retvals = loc_func_retvals; + + /* Give ptr to retval storage also to 'done' function */ + f_done_arg_ptr = loc_func_retvals; + + if (em_core_mask_iszero(&core_mask)) { + /* + * Special handling when calling sync APIs with one core in use. + * Need to call both local- and done-funcs here and return. + */ + env_atomic32_inc(&loc_func_retvals->free_at_zero); + i_event__eo_local_func_call_req(i_event); + em_free(event); + param->f_done_callback(f_done_arg_ptr); + + return EM_OK; + } + + err = send_core_ctrl_events(&core_mask, event, + param->f_done_callback, f_done_arg_ptr, + param->num_notif, param->notif_tbl, + param->sync_operation); + if (unlikely(err)) { + char core_mask_str[EM_CORE_MASK_STRLEN]; + uint32_t unsent_cnt = err; + uint32_t cnt; + + em_free(event); + cnt = env_atomic32_sub_return(&loc_func_retvals->free_at_zero, + unsent_cnt + 1); + if (cnt == 0) + em_free(tmp); + + em_core_mask_tostr(core_mask_str, EM_CORE_MASK_STRLEN, + &core_mask); + return INTERNAL_ERROR(EM_ERR_LIB_FAILED, + EM_ESCOPE_EO_LOCAL_FUNC_CALL_REQ, + "send_core_ctrl_events(mask=%s) failed", + core_mask_str); + } + + return EM_OK; +} + +/** + * EM internal event handler (see em_internal_event.c&h) + * Handle the internal event requesting a local function call. + */ +void +i_event__eo_local_func_call_req(const internal_event_t *i_ev) +{ + em_locm_t *const locm = &em_locm; + const uint64_t f_type = i_ev->loc_func.id; + eo_elem_t *eo_elem = i_ev->loc_func.eo_elem; + const queue_elem_t *q_elem = i_ev->loc_func.q_elem; + int delete_queues = i_ev->loc_func.delete_queues; + loc_func_retval_t *const loc_func_retvals = i_ev->loc_func.retvals; + em_status_t status = EM_ERR; + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + + switch (f_type) { + case EO_START_SYNC_LOCAL_REQ: + if (em_core_count() == 1) { + /* + * Special handling when calling sync API with only one + * core in use: start-local() func already called by + * em_eo_start_sync() and this func called directly from + * within eo_local_func_call_req(). + */ + status = EM_OK; + break; + } + /* fallthrough */ + case EO_START_LOCAL_REQ: + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO start functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = eo_elem->eo; + locm->current.q_elem = &tmp_q_elem; + + locm->start_eo_elem = eo_elem; + status = eo_elem->start_local_func(eo_elem->eo_ctx, + eo_elem->eo); + locm->start_eo_elem = NULL; + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + break; + + case EO_STOP_SYNC_LOCAL_REQ: + if (em_core_count() == 1) { + /* + * Special handling when calling sync API with only one + * core in use: stop-local() func already called by + * em_eo_stop_sync() and this func called directly from + * within eo_local_func_call_req(). + */ + status = EM_OK; + break; + } + /* fallthrough */ + case EO_STOP_LOCAL_REQ: + if (eo_elem->stop_local_func != NULL) { + /* + * Use a tmp q_elem as the 'current q_elem' to enable + * calling em_eo_current() from the EO start functions. + * Before returning, restore the original 'current + * q_elem' from 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = eo_elem->eo; + locm->current.q_elem = &tmp_q_elem; + + status = eo_elem->stop_local_func(eo_elem->eo_ctx, + eo_elem->eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + } else { + status = EM_OK; /* No local stop func given */ + } + break; + + case EO_REM_QUEUE_LOCAL_REQ: + status = eo_remove_queue_local(eo_elem, q_elem); + break; + case EO_REM_QUEUE_SYNC_LOCAL_REQ: + status = eo_remove_queue_sync_local(eo_elem, q_elem); + break; + case EO_REM_QUEUE_ALL_LOCAL_REQ: + status = eo_remove_queue_all_local(eo_elem, delete_queues); + break; + case EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ: + status = eo_remove_queue_all_sync_local(eo_elem, delete_queues); + break; + default: + status = EM_FATAL(EM_ERR_BAD_ID); + break; + } + + if (status != EM_OK) { + /* store failing status, egrp 'done' can check if all ok */ + loc_func_retvals->core[em_core_id()] = status; + + INTERNAL_ERROR(status, EM_ESCOPE_EVENT_INTERNAL_LFUNC_CALL, + "EO:%" PRI_EO "-%s:Local func(%" PRIx64 ")fail", + eo_elem->eo, eo_elem->name, f_type); + } + + /* + * In case of setup error, determine if 'loc_func_retvals' should be + * freed here, in the setup code in eo_local_func_call_req() or + * normally in a successful case in the + * eo_start/stop_local__done_callback() function when the event group + * completion notif is handled. + */ + const uint32_t cnt = + env_atomic32_sub_return(&loc_func_retvals->free_at_zero, 1); + if (unlikely(cnt == 0)) { + (void)check_eo_local_status(loc_func_retvals); + em_free(loc_func_retvals->event); + } +} + +unsigned int +eo_count(void) +{ + return env_atomic32_get(&em_shm->eo_count); +} + +size_t eo_get_name(const eo_elem_t *const eo_elem, + char name[/*out*/], const size_t maxlen) +{ + size_t len; + + len = strnlen(eo_elem->name, sizeof(eo_elem->name) - 1); + if (maxlen - 1 < len) + len = maxlen - 1; + + memcpy(name, eo_elem->name, len); + name[len] = '\0'; + + return len; +} + +static const char *state_to_str(em_eo_state_t state) +{ + const char *state_str; + + switch (state) { + case EM_EO_STATE_UNDEF: + state_str = "UNDEF"; + break; + case EM_EO_STATE_CREATED: + state_str = "CREATED"; + break; + case EM_EO_STATE_STARTING: + state_str = "STARTING"; + break; + case EM_EO_STATE_RUNNING: + state_str = "RUNNING"; + break; + case EM_EO_STATE_STOPPING: + state_str = "STOPPING"; + break; + case EM_EO_STATE_ERROR: + state_str = "ERROR"; + break; + default: + state_str = "UNKNOWN"; + break; + } + + return state_str; +} + +#define EO_INFO_HDR_FMT \ +"Number of EOs: %d\n\n" \ +"ID Name State Start-local Stop-local" \ +" Multi-rcv Max-events Err-hdl Q-num EO-ctx\n" \ +"---------------------------------------------------------------------------" \ +"-----------------------------------------------\n%s\n" + +#define EO_INFO_LEN 123 +#define EO_INFO_FMT "%-10" PRI_EO "%-32s%-10s%-13c%-12c%-11c%-12d%-9c%-7d%-6c\n" + +void eo_info_print_all(void) +{ + unsigned int num_eo; + eo_elem_t *eo_elem; + int len = 0; + int n_print = 0; + em_eo_t eo = em_eo_get_first(&num_eo); + + /* + * num_eo may not match the amount of EOs actually returned by iterating + * using em_eo_get_next() if EOs are added or removed in parallel by + * another core. Thus space for 10 extra EOs is reserved. If more than 10 + * EOs are added by other cores in parallel, we only print information of + * the (num_eo + 10) EOs. + * + * The extra 1 byte is reserved for the terminating null byte. + */ + const int eo_info_str_len = (num_eo + 10) * EO_INFO_LEN + 1; + char eo_info_str[eo_info_str_len]; + + while (eo != EM_EO_UNDEF) { + eo_elem = eo_elem_get(eo); + if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { + eo = em_eo_get_next(); + continue; + } + + n_print = snprintf(eo_info_str + len, + eo_info_str_len - len, + EO_INFO_FMT, eo, eo_elem->name, + state_to_str(eo_elem->state), + eo_elem->start_local_func ? 'Y' : 'N', + eo_elem->stop_local_func ? 'Y' : 'N', + eo_elem->use_multi_rcv ? 'Y' : 'N', + eo_elem->max_events, + eo_elem->error_handler_func ? 'Y' : 'N', + env_atomic32_get(&eo_elem->num_queues), + eo_elem->eo_ctx ? 'Y' : 'N'); + + /* Not enough space to hold more eo info */ + if (n_print >= eo_info_str_len - len) + break; + + len += n_print; + eo = em_eo_get_next(); + } + + /* No EO */ + if (!len) { + EM_PRINT("No EO has been created!\n"); + return; + } + + /* + * To prevent printing incomplete information of the last eo when there + * is not enough space to hold all eo info. + */ + eo_info_str[len] = '\0'; + EM_PRINT(EO_INFO_HDR_FMT, num_eo, eo_info_str); +} + +#define EO_Q_INFO_HDR_FMT \ +"EO %" PRI_EO "(%s) has %d queue(s):\n\n" \ +"Handle Name Priority Type State Qgrp" \ +" Ctx\n" \ +"---------------------------------------------------------------------------" \ +"---------\n" \ +"%s\n" + +#define EO_Q_INFO_LEN 85 +#define EO_Q_INFO_FMT \ +"%-10" PRI_QUEUE "%-32s%-10d%-10s%-9s%-10" PRI_QGRP "%-3c\n" /*85 characters*/ + +void eo_queue_info_print(em_eo_t eo) +{ + unsigned int q_num; + em_queue_t q; + const queue_elem_t *q_elem; + char q_name[EM_QUEUE_NAME_LEN]; + int len = 0; + int n_print = 0; + const eo_elem_t *eo_elem = eo_elem_get(eo); + + if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { + EM_PRINT("EO %" PRI_EO " is not created!\n", eo); + return; + } + + q = em_eo_queue_get_first(&q_num, eo); + + /* + * q_num may not match the amount of queues actually returned by iterating + * using em_eo_queue_get_next() if queues are added or removed in parallel + * by another core. Thus space for 10 extra queues is reserved. If more + * than 10 queues are added to this EO by other cores, we only print info + * of the (q_num + 10) queues. + * + * The extra 1 byte is reserved for the terminating null byte. + */ + const int eo_q_info_str_len = (q_num + 10) * EO_Q_INFO_LEN + 1; + char eo_q_info_str[eo_q_info_str_len]; + + while (q != EM_QUEUE_UNDEF) { + q_elem = queue_elem_get(q); + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { + q = em_eo_queue_get_next(); + continue; + } + + queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1); + + n_print = snprintf(eo_q_info_str + len, + eo_q_info_str_len - len, + EO_Q_INFO_FMT, + q, q_name, q_elem->priority, + queue_get_type_str(q_elem->type), + queue_get_state_str(q_elem->state), + q_elem->queue_group, + q_elem->context ? 'Y' : 'N'); + + /* Not enough space to hold more queue info */ + if (n_print >= eo_q_info_str_len - len) + break; + + len += n_print; + q = em_eo_queue_get_next(); + } + + /* EO has no queue */ + if (!len) { + EM_PRINT("EO %" PRI_EO "(%s) has no queue!\n", eo, eo_elem->name); + return; + } + + /* + * To prevent printing incomplete information of the last queue when + * there is not enough space to hold all queue info. + */ + eo_q_info_str[len] = '\0'; + EM_PRINT(EO_Q_INFO_HDR_FMT, eo, eo_elem->name, q_num, eo_q_info_str); +} + +/** + * @brief Create a stash used to buffer events sent during EO-start + */ +odp_stash_t eo_start_stash_create(const char *name) +{ + unsigned int num_obj = 0; + odp_stash_capability_t stash_capa; + odp_stash_param_t stash_param; + odp_stash_t stash = ODP_STASH_INVALID; + + int ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); + + if (ret != 0) + return ODP_STASH_INVALID; + + odp_stash_param_init(&stash_param); + + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + stash_param.get_mode = ODP_STASH_OP_MT; + + /* Stash size: use EM default queue size value from config file: */ + num_obj = em_shm->opt.queue.min_events_default; + if (num_obj != 0) + stash_param.num_obj = num_obj; + /* else: use odp default as set by odp_stash_param_init() */ + + if (stash_param.num_obj > stash_capa.max_num_obj) { + EM_LOG(EM_LOG_PRINT, + "%s(): req stash.num_obj(%" PRIu64 ") > capa.max_num_obj(%" PRIu64 ").\n" + " ==> using max value:%" PRIu64 "\n", __func__, + stash_param.num_obj, stash_capa.max_num_obj, stash_capa.max_num_obj); + stash_param.num_obj = stash_capa.max_num_obj; + } + + stash_param.obj_size = sizeof(uint64_t); + stash_param.cache_size = 0; /* No core local caching */ + + stash = odp_stash_create(name, &stash_param); + if (unlikely(stash == ODP_STASH_INVALID)) + return ODP_STASH_INVALID; + + return stash; +} diff --git a/src/em_eo.h b/src/em_eo.h index cd6155a5..c86cf78e 100644 --- a/src/em_eo.h +++ b/src/em_eo.h @@ -1,178 +1,178 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_EO_H_ -#define EM_EO_H_ - -/** - * @file - * EM internal EO functions - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#define invalid_eo(eo) ((unsigned int)eo_hdl2idx((eo)) >= EM_MAX_EOS) - -em_status_t -eo_init(eo_tbl_t eo_tbl[], eo_pool_t *eo_pool); - -em_eo_t -eo_alloc(void); - -em_status_t -eo_free(em_eo_t eo); - -em_status_t -eo_add_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem); - -em_status_t -eo_rem_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem); - -em_status_t -eo_rem_queue_all(eo_elem_t *const eo_elem); - -em_status_t -eo_delete_queue_all(eo_elem_t *const eo_elem); - -em_status_t -eo_start_local_req(eo_elem_t *const eo_elem, - int num_notif, const em_notif_t notif_tbl[]); -em_status_t -eo_start_sync_local_req(eo_elem_t *const eo_elem); - -int -eo_start_buffer_events(const em_event_t events[], int num, em_queue_t queue, - em_event_group_t event_group); -void -eo_start_send_buffered_events(eo_elem_t *const eo_elem); - -em_status_t -eo_stop_local_req(eo_elem_t *const eo_elem, - int num_notif, const em_notif_t notif_tbl[]); -em_status_t -eo_stop_sync_local_req(eo_elem_t *const eo_elem); - -em_status_t -eo_remove_queue_local_req(eo_elem_t *const eo_elem, queue_elem_t *const q_elem, - int num_notif, const em_notif_t notif_tbl[]); -em_status_t -eo_remove_queue_sync_local_req(eo_elem_t *const eo_elem, - queue_elem_t *const q_elem); -em_status_t -eo_remove_queue_all_local_req(eo_elem_t *const eo_elem, int delete_queues, - int num_notif, const em_notif_t notif_tbl[]); -em_status_t -eo_remove_queue_all_sync_local_req(eo_elem_t *const eo_elem, int delete_queues); - -unsigned int -eo_count(void); - -size_t eo_get_name(const eo_elem_t *const eo_elem, - char name[/*out*/], const size_t maxlen); - -static inline int -eo_allocated(const eo_elem_t *const eo_elem) -{ - return !objpool_in_pool(&eo_elem->eo_pool_elem); -} - -/** Convert eo handle to eo index */ -static inline int -eo_hdl2idx(em_eo_t eo) -{ - return (int)(uintptr_t)eo - 1; -} - -/** Convert eo index to eo handle */ -static inline em_eo_t -eo_idx2hdl(int eo_idx) -{ - return (em_eo_t)(uintptr_t)(eo_idx + 1); -} - -/** Returns EO element associated with EO handle */ -static inline eo_elem_t * -eo_elem_get(em_eo_t eo) -{ - const int eo_idx = eo_hdl2idx(eo); - eo_elem_t *eo_elem; - - if (unlikely((unsigned int)eo_idx > EM_MAX_EOS - 1)) - return NULL; - - eo_elem = &em_shm->eo_tbl.eo_elem[eo_idx]; - - return eo_elem; -} - -/** Returns the EO element of the currently active EO (if any)*/ -static inline eo_elem_t * -eo_elem_current(void) -{ - const queue_elem_t *const q_elem = em_locm.current.q_elem; - - if (unlikely(q_elem == NULL)) - return NULL; - - return q_elem->eo_elem; -} - -static inline em_eo_t -eo_current(void) -{ - const queue_elem_t *const q_elem = em_locm.current.q_elem; - - if (unlikely(q_elem == NULL)) - return EM_EO_UNDEF; - - return q_elem->eo; -} - -/** - * EM internal event handler (see em_internal_event.c&h) - * Handle the internal event requesting a local function call. - */ -void -i_event__eo_local_func_call_req(const internal_event_t *i_ev); - -/** Print information about all EOs */ -void eo_info_print_all(void); - -/** Print information about all queues of the given eo */ -void eo_queue_info_print(em_eo_t eo); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EO_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_EO_H_ +#define EM_EO_H_ + +/** + * @file + * EM internal EO functions + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define invalid_eo(eo) ((unsigned int)eo_hdl2idx((eo)) >= EM_MAX_EOS) + +em_status_t +eo_init(eo_tbl_t eo_tbl[], eo_pool_t *eo_pool); + +em_eo_t +eo_alloc(void); + +em_status_t +eo_free(em_eo_t eo); + +em_status_t +eo_add_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem); + +em_status_t +eo_rem_queue(eo_elem_t *const eo_elem, queue_elem_t *const q_elem); + +em_status_t +eo_rem_queue_all(eo_elem_t *const eo_elem); + +em_status_t +eo_delete_queue_all(eo_elem_t *const eo_elem); + +em_status_t +eo_start_local_req(eo_elem_t *const eo_elem, + int num_notif, const em_notif_t notif_tbl[]); +em_status_t +eo_start_sync_local_req(eo_elem_t *const eo_elem); + +int eo_start_buffer_events(const em_event_t events[], int num, em_queue_t queue); + +void eo_start_send_buffered_events(eo_elem_t *const eo_elem); + +em_status_t +eo_stop_local_req(eo_elem_t *const eo_elem, + int num_notif, const em_notif_t notif_tbl[]); +em_status_t +eo_stop_sync_local_req(eo_elem_t *const eo_elem); + +em_status_t +eo_remove_queue_local_req(eo_elem_t *const eo_elem, queue_elem_t *const q_elem, + int num_notif, const em_notif_t notif_tbl[]); +em_status_t +eo_remove_queue_sync_local_req(eo_elem_t *const eo_elem, + queue_elem_t *const q_elem); +em_status_t +eo_remove_queue_all_local_req(eo_elem_t *const eo_elem, int delete_queues, + int num_notif, const em_notif_t notif_tbl[]); +em_status_t +eo_remove_queue_all_sync_local_req(eo_elem_t *const eo_elem, int delete_queues); + +unsigned int +eo_count(void); + +size_t eo_get_name(const eo_elem_t *const eo_elem, + char name[/*out*/], const size_t maxlen); + +odp_stash_t eo_start_stash_create(const char *name); + +static inline int +eo_allocated(const eo_elem_t *const eo_elem) +{ + return !objpool_in_pool(&eo_elem->eo_pool_elem); +} + +/** Convert eo handle to eo index */ +static inline int +eo_hdl2idx(em_eo_t eo) +{ + return (int)(uintptr_t)eo - 1; +} + +/** Convert eo index to eo handle */ +static inline em_eo_t +eo_idx2hdl(int eo_idx) +{ + return (em_eo_t)(uintptr_t)(eo_idx + 1); +} + +/** Returns EO element associated with EO handle */ +static inline eo_elem_t * +eo_elem_get(em_eo_t eo) +{ + const int eo_idx = eo_hdl2idx(eo); + eo_elem_t *eo_elem; + + if (unlikely((unsigned int)eo_idx > EM_MAX_EOS - 1)) + return NULL; + + eo_elem = &em_shm->eo_tbl.eo_elem[eo_idx]; + + return eo_elem; +} + +/** Returns the EO element of the currently active EO (if any)*/ +static inline eo_elem_t * +eo_elem_current(void) +{ + const queue_elem_t *const q_elem = em_locm.current.q_elem; + + if (unlikely(q_elem == NULL)) + return NULL; + + return q_elem->eo_elem; +} + +static inline em_eo_t +eo_current(void) +{ + const queue_elem_t *const q_elem = em_locm.current.q_elem; + + if (unlikely(q_elem == NULL)) + return EM_EO_UNDEF; + + return q_elem->eo; +} + +/** + * EM internal event handler (see em_internal_event.c&h) + * Handle the internal event requesting a local function call. + */ +void +i_event__eo_local_func_call_req(const internal_event_t *i_ev); + +/** Print information about all EOs */ +void eo_info_print_all(void); + +/** Print information about all queues of the given eo */ +void eo_queue_info_print(em_eo_t eo); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EO_H_ */ diff --git a/src/em_eo_types.h b/src/em_eo_types.h index aeecb376..624bfab8 100644 --- a/src/em_eo_types.h +++ b/src/em_eo_types.h @@ -1,102 +1,102 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_EO_TYPES_H_ -#define EM_EO_TYPES_H_ - -/** - * @file - * EM internal EO types & definitions - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * EM EO element - */ -typedef struct { - /** EO name */ - char name[EM_EO_NAME_LEN] ENV_CACHE_LINE_ALIGNED; - /** EO state */ - em_eo_state_t state; - /** EO start function */ - em_start_func_t start_func; - /** EO core-local start function */ - em_start_local_func_t start_local_func; - /** EO stop function */ - em_stop_func_t stop_func; - /** EO core-local stop function */ - em_stop_local_func_t stop_local_func; - - int use_multi_rcv; /* true:receive_multi_func(), false:receive_func() */ - int max_events; - /** EO event receive function */ - em_receive_func_t receive_func; - /** EO multi-event receive function */ - em_receive_multi_func_t receive_multi_func; - - /** EO specific error handler function */ - em_error_handler_t error_handler_func; - /** EO context data pointer */ - void *eo_ctx; - /** EO elem lock */ - env_spinlock_t lock; - /** EO queue list */ - list_node_t queue_list; - /** Number of queues */ - env_atomic32_t num_queues; - /** List of buffered events sent during the EO start-function */ - list_node_t startfn_evlist; - /** EO handle */ - em_eo_t eo; - /** EO pool elem for linking free EOs for EO-alloc */ - objpool_elem_t eo_pool_elem; -} eo_elem_t ENV_CACHE_LINE_ALIGNED; - -/** - * EO EO element table - */ -typedef struct { - /** EO element table */ - eo_elem_t eo_elem[EM_MAX_EOS]; -} eo_tbl_t; - -typedef struct { - objpool_t objpool; -} eo_pool_t; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EO_TYPES_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_EO_TYPES_H_ +#define EM_EO_TYPES_H_ + +/** + * @file + * EM internal EO types & definitions + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * EM EO element + */ +typedef struct { + /** EO name */ + char name[EM_EO_NAME_LEN] ENV_CACHE_LINE_ALIGNED; + /** EO state */ + em_eo_state_t state; + /** EO start function */ + em_start_func_t start_func; + /** EO core-local start function */ + em_start_local_func_t start_local_func; + /** EO stop function */ + em_stop_func_t stop_func; + /** EO core-local stop function */ + em_stop_local_func_t stop_local_func; + + int use_multi_rcv; /* true:receive_multi_func(), false:receive_func() */ + int max_events; + /** EO event receive function */ + em_receive_func_t receive_func; + /** EO multi-event receive function */ + em_receive_multi_func_t receive_multi_func; + + /** EO specific error handler function */ + em_error_handler_t error_handler_func; + /** EO context data pointer */ + void *eo_ctx; + /** EO elem lock */ + env_spinlock_t lock; + /** EO queue list */ + list_node_t queue_list; + /** Number of queues */ + env_atomic32_t num_queues; + /** Buffered events sent during the EO start-function */ + odp_stash_t stash; + /** EO handle */ + em_eo_t eo; + /** EO pool elem for linking free EOs for EO-alloc */ + objpool_elem_t eo_pool_elem; +} eo_elem_t ENV_CACHE_LINE_ALIGNED; + +/** + * EO EO element table + */ +typedef struct { + /** EO element table */ + eo_elem_t eo_elem[EM_MAX_EOS]; +} eo_tbl_t; + +typedef struct { + objpool_t objpool; +} eo_pool_t; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EO_TYPES_H_ */ diff --git a/src/em_error.h b/src/em_error.h index 182357e4..4b8f49c2 100644 --- a/src/em_error.h +++ b/src/em_error.h @@ -1,110 +1,112 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine Error Handler functions - */ - -#ifndef EM_ERROR_H -#define EM_ERROR_H - -/** - * Internal error reporting macro - */ -#define INTERNAL_ERROR(error, escope, fmt, ...) \ - internal_error((error), (escope), __FILE__, __func__, \ - __LINE__, fmt, ## __VA_ARGS__) - -/** - * Internal macro for return on error - */ -#define RETURN_ERROR_IF(cond, error, escope, fmt, ...) { \ - if (unlikely((cond))) { \ - return INTERNAL_ERROR((error), (escope), \ - fmt, ## __VA_ARGS__); \ - } \ -} - -#define EM_LOG(level, fmt, ...) \ - do { \ - em_locm_t *const locm = &em_locm; \ - if (locm && locm->log_fn) \ - locm->log_fn((level), fmt, ## __VA_ARGS__); \ - else \ - em_shm->log_fn((level), fmt, ## __VA_ARGS__); \ - } while (0) - -#define EM_VLOG(level, fmt, args) (em_shm->vlog_fn((level), fmt, (args))) - -#define EM_PRINT(fmt, ...) EM_LOG(EM_LOG_PRINT, fmt, ## __VA_ARGS__) - -/* - * Print debug message to log (only if EM_DEBUG_PRINT is set) - */ -#define EM_DBG(fmt, ...) { \ - if (EM_DEBUG_PRINT == 1) \ - EM_LOG(EM_LOG_DBG, fmt, ##__VA_ARGS__); \ -} - -/** - * EM internal error - * Don't call directly, should _always_ be used from within the error-macros - */ -em_status_t -internal_error(em_status_t error, em_escope_t escope, ...); - -em_status_t -early_log_init(em_log_func_t user_log_fn, em_vlog_func_t user_vlog_fn); - -void -log_init(void); - -void -error_init(void); - -em_status_t -default_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, - va_list args); - -em_status_t -select_error_handler(em_status_t error, em_escope_t escope, va_list args_list); - -uint64_t -load_global_err_cnt(void); - -ODP_PRINTF_FORMAT(2, 3) -int default_log(em_log_level_t level, const char *fmt, ...); - -int -vdefault_log(em_log_level_t level, const char *fmt, va_list args); - -#endif /* EM_ERROR_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine Error Handler functions + */ + +#ifndef EM_ERROR_H +#define EM_ERROR_H + +/** + * Internal error reporting macro + */ +#define INTERNAL_ERROR(error, escope, fmt, ...) \ + internal_error((error), (escope), __FILE__, __func__, \ + __LINE__, fmt, ## __VA_ARGS__) + +/** + * Internal macro for return on error + */ +#define RETURN_ERROR_IF(cond, error, escope, fmt, ...) \ + do { \ + if (unlikely((cond))) { \ + return INTERNAL_ERROR((error), (escope), \ + fmt, ## __VA_ARGS__); \ + } \ + } while (0) + +#define EM_LOG(level, fmt, ...) \ + do { \ + em_locm_t *const locm = &em_locm; \ + if (locm && locm->log_fn) \ + locm->log_fn((level), fmt, ## __VA_ARGS__); \ + else \ + em_shm->log_fn((level), fmt, ## __VA_ARGS__); \ + } while (0) + +#define EM_VLOG(level, fmt, args) (em_shm->vlog_fn((level), fmt, (args))) + +#define EM_PRINT(fmt, ...) EM_LOG(EM_LOG_PRINT, fmt, ## __VA_ARGS__) + +/* + * Print debug message to log (only if EM_DEBUG_PRINT is set) + */ +#define EM_DBG(fmt, ...) \ + do { \ + if (EM_DEBUG_PRINT == 1) \ + EM_LOG(EM_LOG_DBG, fmt, ##__VA_ARGS__); \ + } while (0) + +/** + * EM internal error + * Don't call directly, should _always_ be used from within the error-macros + */ +em_status_t +internal_error(em_status_t error, em_escope_t escope, ...); + +em_status_t +early_log_init(em_log_func_t user_log_fn, em_vlog_func_t user_vlog_fn); + +void +log_init(void); + +void +error_init(void); + +em_status_t +default_error_handler(em_eo_t eo, em_status_t error, em_escope_t escope, + va_list args); + +em_status_t +select_error_handler(em_status_t error, em_escope_t escope, va_list args_list); + +uint64_t +load_global_err_cnt(void); + +ODP_PRINTF_FORMAT(2, 3) +int default_log(em_log_level_t level, const char *fmt, ...); + +int +vdefault_log(em_log_level_t level, const char *fmt, va_list args); + +#endif /* EM_ERROR_H_ */ diff --git a/src/em_event.c b/src/em_event.c index 671794bf..a39c50fa 100644 --- a/src/em_event.c +++ b/src/em_event.c @@ -1,225 +1,302 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* - * Sanity check that no extra padding is added to the event_hdr_t by - * alignment directives etc. - */ -typedef event_hdr_t _ev_hdr__size_check__arr_t[3]; -COMPILE_TIME_ASSERT(sizeof(_ev_hdr__size_check__arr_t) == - 3 * sizeof(event_hdr_t), EVENT_HDR_SIZE_ERROR2); - -/* - * Verify the value set for EM_CHECK_LEVEL - this define is set either from - * the include/event_machine/platform/event_machine_config.h file or by the - * configure.ac option --enable-check-level=N. - */ -COMPILE_TIME_ASSERT(EM_CHECK_LEVEL >= 0 && EM_CHECK_LEVEL <= 3, - EM_CHECK_LEVEL__BAD_VALUE); - -em_status_t event_init(void) -{ - return EM_OK; -} - -void print_event_info(void) -{ - /* for sizeof() only: */ - event_hdr_t evhdr = {0}; - - EM_PRINT("\n" - "EM Events\n" - "---------\n" - "event-hdr size: %zu B\n", - sizeof(event_hdr_t)); - - EM_DBG("\t\toffset\tsize\n" - "\t\t------\t----\n" - "esv.state_cnt:\t%3zu B\t%2zu B\n" - "esv.state:\t%3zu B\t%2zu B\n" - "start_node:\t%3zu B\t%2zu B\n" - "q_elem:\t\t%3zu B\t%2zu B\n" - "user_area info:\t%3zu B\t%2zu B\n" - "event:\t\t%3zu B\t%2zu B\n" - "queue:\t\t%3zu B\t%2zu B\n" - "egrp:\t\t%3zu B\t%2zu B\n" - "egrp_gen:\t%3zu B\t%2zu B\n" - "event_size:\t%3zu B\t%2zu B\n" - "align_offset:\t%3zu B\t%2zu B\n" - "event_type:\t%3zu B\t%2zu B\n" - "end_hdr_data:\t%3zu B\t%2zu B\n" - " \t---\t%2zu B\n" - "end:\t\t%3zu B\t%2zu B\n", - offsetof(event_hdr_t, state_cnt), sizeof(evhdr.state_cnt), - offsetof(event_hdr_t, state), sizeof(evhdr.state), - offsetof(event_hdr_t, start_node), sizeof(evhdr.start_node), - offsetof(event_hdr_t, q_elem), sizeof(evhdr.q_elem), - offsetof(event_hdr_t, user_area), sizeof(evhdr.user_area), - offsetof(event_hdr_t, event), sizeof(evhdr.event), - offsetof(event_hdr_t, queue), sizeof(evhdr.queue), - offsetof(event_hdr_t, egrp), sizeof(evhdr.egrp), - offsetof(event_hdr_t, egrp_gen), sizeof(evhdr.egrp_gen), - offsetof(event_hdr_t, event_size), sizeof(evhdr.event_size), - offsetof(event_hdr_t, align_offset), sizeof(evhdr.align_offset), - offsetof(event_hdr_t, event_type), sizeof(evhdr.event_type), - offsetof(event_hdr_t, end_hdr_data), sizeof(evhdr.end_hdr_data), - offsetof(event_hdr_t, end) - offsetof(event_hdr_t, end_hdr_data), - offsetof(event_hdr_t, end), sizeof(evhdr.end)); - - EM_PRINT("\n"); -} - -/** - * Helper for em_event_clone(). - * - * Clone an event originating from an external odp pkt-pool. - * Initialize the new cloned event as an EM event and return it. - */ -em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool) -{ - /* - * Alloc and copy content via ODP. - * Also the ev_hdr in the odp-pkt user_area is copied. - */ - odp_packet_t clone_pkt = odp_packet_copy(pkt, pkt_pool); - - if (unlikely(clone_pkt == ODP_PACKET_INVALID)) - return EM_EVENT_UNDEF; - - odp_packet_user_ptr_set(clone_pkt, PKT_USERPTR_MAGIC_NBR); - - odp_event_t odp_clone_event = odp_packet_to_event(clone_pkt); - event_hdr_t *clone_hdr = odp_packet_user_area(clone_pkt); - em_event_t clone_event = event_odp2em(odp_clone_event); - - /* - * Init hdr of event, also ESV init if needed. - * The clone_hdr is a copy of parent's, update only relevant fields. - */ - if (esv_enabled()) - clone_event = evstate_init(clone_event, clone_hdr, false); - else - clone_hdr->event = clone_event; - - /* clone_hdr->event_type = use parent's type as is */ - clone_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - return clone_event; -} - -void -output_queue_track(queue_elem_t *const output_q_elem) -{ - output_queue_track_t *const track = - &em_locm.output_queue_track; - const int qidx = queue_hdl2idx(output_q_elem->queue); - - if (track->used_queues[qidx] == NULL) { - track->used_queues[qidx] = output_q_elem; - track->idx[track->idx_cnt++] = qidx; - } -} - -void -output_queue_drain(const queue_elem_t *output_q_elem) -{ - const em_queue_t output_queue = output_q_elem->queue; - const em_output_func_t output_fn = - output_q_elem->output.output_conf.output_fn; - void *const output_fn_args = - output_q_elem->output.output_conf.output_fn_args; - - const int deq_max = 32; - - em_event_t output_ev_tbl[deq_max]; - /* use same event-tbl, dequeue odp events into the EM event-tbl */ - odp_event_t *const odp_deq_events = (odp_event_t *)output_ev_tbl; - - const odp_queue_t odp_queue = output_q_elem->odp_queue; - unsigned int output_num; - int deq; - int ret; - - const bool esv_ena = esv_enabled(); - - do { - deq = odp_queue_deq_multi(odp_queue, - odp_deq_events/*out=output_ev_tbl[]*/, - deq_max); - if (unlikely(deq <= 0)) - return; - - output_num = (unsigned int)deq; - /* odp_deq_events[] == output_ev_tbl[], .evgen still missing */ - - /* decrement pool statistics before passing events out-of-EM */ - if (esv_ena) { - event_hdr_t *ev_hdrs[output_num]; - - event_to_hdr_multi(output_ev_tbl, ev_hdrs, output_num); - evstate_em2usr_multi(output_ev_tbl/*in/out*/, - ev_hdrs, output_num, - EVSTATE__OUTPUT_MULTI); - } - - ret = output_fn(output_ev_tbl, output_num, - output_queue, output_fn_args); - - if (unlikely((unsigned int)ret != output_num)) - em_free_multi(&output_ev_tbl[ret], output_num - ret); - } while (deq > 0); -} - -void -output_queue_buffering_drain(void) -{ - output_queue_track_t *const track = &em_locm.output_queue_track; - - for (unsigned int i = 0; i < track->idx_cnt; i++) { - int qidx = track->idx[i]; - queue_elem_t *output_q_elem = track->used_queues[qidx]; - env_spinlock_t *lock = &output_q_elem->output.lock; - - /* - * drain if lock available, otherwise another core is already - * draining so no need to do anything. - */ - if (env_spinlock_trylock(lock)) { - output_queue_drain(output_q_elem); - env_spinlock_unlock(lock); - } - - track->idx[i] = 0; - track->used_queues[qidx] = NULL; - } - track->idx_cnt = 0; -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* + * Sanity check that no extra padding is added to the event_hdr_t by + * alignment directives etc. + */ +typedef event_hdr_t _ev_hdr__size_check__arr_t[3]; +COMPILE_TIME_ASSERT(sizeof(_ev_hdr__size_check__arr_t) == + 3 * sizeof(event_hdr_t), EVENT_HDR_SIZE_ERROR2); + +/* + * Verify the value set for EM_CHECK_LEVEL - this define is set either from + * the include/event_machine/platform/event_machine_config.h file or by the + * configure.ac option --enable-check-level=N. + */ +COMPILE_TIME_ASSERT(EM_CHECK_LEVEL >= 0 && EM_CHECK_LEVEL <= 3, + EM_CHECK_LEVEL__BAD_VALUE); + +em_status_t event_init(void) +{ + return EM_OK; +} + +void print_event_info(void) +{ + /* for sizeof() only: */ + event_hdr_t evhdr = {0}; + + EM_PRINT("\n" + "EM Events\n" + "---------\n" + "event-hdr size: %zu B\n", + sizeof(event_hdr_t)); + + EM_DBG("\t\toffset\tsize\n" + "\t\t------\t----\n" + "esv.state_cnt:\t%3zu B\t%2zu B\n" + "esv.state:\t%3zu B\t%2zu B\n" + "event:\t\t%3zu B\t%2zu B\n" + "event_size:\t%3zu B\t%2zu B\n" + "event_type:\t%3zu B\t%2zu B\n" + "flags:\t\t%3zu B\t%2zu B\n" + "align_offset:\t%3zu B\t%2zu B\n" + "egrp_gen:\t%3zu B\t%2zu B\n" + "egrp:\t\t%3zu B\t%2zu B\n" + "user_area info:\t%3zu B\t%2zu B\n" + "end_hdr_data:\t%3zu B\t%2zu B\n" + " \t---\t%2zu B\n" + "end:\t\t%3zu B\t%2zu B\n", + offsetof(event_hdr_t, state_cnt), sizeof(evhdr.state_cnt), + offsetof(event_hdr_t, state), sizeof(evhdr.state), + offsetof(event_hdr_t, event), sizeof(evhdr.event), + offsetof(event_hdr_t, event_size), sizeof(evhdr.event_size), + offsetof(event_hdr_t, event_type), sizeof(evhdr.event_type), + offsetof(event_hdr_t, flags), sizeof(evhdr.flags), + offsetof(event_hdr_t, align_offset), sizeof(evhdr.align_offset), + offsetof(event_hdr_t, egrp_gen), sizeof(evhdr.egrp_gen), + offsetof(event_hdr_t, egrp), sizeof(evhdr.egrp), + offsetof(event_hdr_t, user_area), sizeof(evhdr.user_area), + offsetof(event_hdr_t, end_hdr_data), sizeof(evhdr.end_hdr_data), + offsetof(event_hdr_t, end) - offsetof(event_hdr_t, end_hdr_data), + offsetof(event_hdr_t, end), sizeof(evhdr.end)); + + EM_PRINT("\n"); +} + +/** + * Helper for em_event_clone(). + * + * Clone an event originating from an external odp pkt-pool. + * Initialize the new cloned event as an EM event and return it. + */ +em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool) +{ + /* + * Alloc and copy content via ODP. + * Also the ev_hdr in the odp-pkt user_area is copied. + */ + odp_packet_t clone_pkt = odp_packet_copy(pkt, pkt_pool); + + if (unlikely(clone_pkt == ODP_PACKET_INVALID)) + return EM_EVENT_UNDEF; + + odp_packet_user_ptr_set(clone_pkt, PKT_USERPTR_MAGIC_NBR); + + odp_event_t odp_clone_event = odp_packet_to_event(clone_pkt); + event_hdr_t *clone_hdr = odp_packet_user_area(clone_pkt); + em_event_t clone_event = event_odp2em(odp_clone_event); + + /* + * Init hdr of event, also ESV init if needed. + * The clone_hdr is a copy of parent's, update only relevant fields. + */ + if (esv_enabled()) + clone_event = evstate_init(clone_event, clone_hdr, false); + else + clone_hdr->event = clone_event; + + /* clone_hdr->event_type = use parent's type as is */ + clone_hdr->egrp = EM_EVENT_GROUP_UNDEF; + clone_hdr->flags.all = 0; + + return clone_event; +} + +void +output_queue_track(queue_elem_t *const output_q_elem) +{ + output_queue_track_t *const track = + &em_locm.output_queue_track; + const int qidx = queue_hdl2idx(output_q_elem->queue); + + if (track->used_queues[qidx] == NULL) { + track->used_queues[qidx] = output_q_elem; + track->idx[track->idx_cnt++] = qidx; + } +} + +void +output_queue_drain(const queue_elem_t *output_q_elem) +{ + const em_queue_t output_queue = output_q_elem->queue; + const em_output_func_t output_fn = + output_q_elem->output.output_conf.output_fn; + void *const output_fn_args = + output_q_elem->output.output_conf.output_fn_args; + + const int deq_max = 32; + + em_event_t output_ev_tbl[deq_max]; + /* use same event-tbl, dequeue odp events into the EM event-tbl */ + odp_event_t *const odp_deq_events = (odp_event_t *)output_ev_tbl; + + const odp_queue_t odp_queue = output_q_elem->odp_queue; + unsigned int output_num; + int deq; + int ret; + + const bool esv_ena = esv_enabled(); + + do { + deq = odp_queue_deq_multi(odp_queue, + odp_deq_events/*out=output_ev_tbl[]*/, + deq_max); + if (unlikely(deq <= 0)) + return; + + output_num = (unsigned int)deq; + /* odp_deq_events[] == output_ev_tbl[] */ + if (esv_ena) { + event_hdr_t *ev_hdrs[output_num]; + + /* Restore hdls from ev_hdrs, odp-ev conv lost evgen */ + event_to_hdr_multi(output_ev_tbl, ev_hdrs, output_num); + for (unsigned int i = 0; i < output_num; i++) + output_ev_tbl[i] = ev_hdrs[i]->event; + } + + ret = output_fn(output_ev_tbl, output_num, + output_queue, output_fn_args); + + if (unlikely((unsigned int)ret != output_num)) + em_free_multi(&output_ev_tbl[ret], output_num - ret); + } while (deq > 0); +} + +void +output_queue_buffering_drain(void) +{ + output_queue_track_t *const track = &em_locm.output_queue_track; + + for (unsigned int i = 0; i < track->idx_cnt; i++) { + int qidx = track->idx[i]; + queue_elem_t *output_q_elem = track->used_queues[qidx]; + env_spinlock_t *lock = &output_q_elem->output.lock; + + /* + * drain if lock available, otherwise another core is already + * draining so no need to do anything. + */ + if (env_spinlock_trylock(lock)) { + output_queue_drain(output_q_elem); + env_spinlock_unlock(lock); + } + + track->idx[i] = 0; + track->used_queues[qidx] = NULL; + } + track->idx_cnt = 0; +} + +uint32_t event_vector_tbl(em_event_t vector_event, + em_event_t **event_tbl /*out*/) +{ + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + odp_packet_t *pkt_tbl = NULL; + const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/); + + *event_tbl = (em_event_t *)pkt_tbl; /* Careful! Points to same table */ + + if (!pkts) + return 0; + + event_hdr_t *ev_hdr_tbl[pkts]; + + /* + * Init the event-table as needed, might contain EM events or + * ODP packets depending on source. + */ + if (esv_enabled()) { + odp_packet_t odp_pkttbl[pkts]; + + /* + * Drop ESV generation from event handles by converting to + * odp-packets, then init as needed as EM events. + */ + events_em2pkt(*event_tbl/*in*/, odp_pkttbl/*out*/, pkts); + + event_init_pkt_multi(odp_pkttbl /*in*/, *event_tbl /*in,out*/, + ev_hdr_tbl /*out*/, pkts, false); + } else { + event_init_pkt_multi(pkt_tbl /*in*/, *event_tbl /*in,out*/, + ev_hdr_tbl /*out*/, pkts, false); + } + + return pkts; +} + +em_status_t event_vector_max_size(em_event_t vector_event, uint32_t *max_size /*out*/, + em_escope_t escope) +{ + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pktvec = odp_packet_vector_from_event(odp_event); + odp_pool_t odp_pool = odp_packet_vector_pool(pktvec); + em_pool_t pool = pool_odp2em(odp_pool); + + if (unlikely(pool == EM_POOL_UNDEF)) { + /* + * Don't report an error if 'pool == EM_POOL_UNDEF' since that + * might happen if the vector is input from pktio that is using + * external (to EM) odp vector pools. + */ + *max_size = 0; + return EM_OK; /* EM does not have the max_size info */ + } + + const mpool_elem_t *pool_elem = pool_elem_get(pool); + int i = 0; + + if (unlikely(!pool_elem || + (EM_CHECK_LEVEL > 2 && !pool_allocated(pool_elem)))) { + *max_size = 0; + return INTERNAL_ERROR(EM_ERR_BAD_STATE, escope, + "Invalid pool:%" PRI_POOL "", pool); + } + + /* find subpool index 'i' that corresponds to 'odp_pool' */ + for (i = 0; i < pool_elem->num_subpools && pool_elem->odp_pool[i] != odp_pool; i++) + ; /* find subpool-index 'i' */ + if (unlikely(i == pool_elem->num_subpools)) { + /* not found */ + *max_size = 0; + return INTERNAL_ERROR(EM_ERR_NOT_FOUND, escope, + "Subpool not found, pool:%" PRI_POOL "", pool); + } + + /* subpool index found, store corresponding size */ + *max_size = pool_elem->size[i]; + + return EM_OK; +} diff --git a/src/em_event.h b/src/em_event.h index 2482e6ab..a992b8f9 100644 --- a/src/em_event.h +++ b/src/em_event.h @@ -1,1237 +1,1707 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * EM internal event functions - * - */ - -#ifndef EM_EVENT_H_ -#define EM_EVENT_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef __clang__ -COMPILE_TIME_ASSERT((uintptr_t)EM_EVENT_UNDEF == (uintptr_t)ODP_EVENT_INVALID, - EM_EVENT_NOT_EQUAL_TO_ODP_EVENT); -#endif - -em_status_t event_init(void); -void print_event_info(void); -em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool); -void output_queue_track(queue_elem_t *const output_q_elem); -void output_queue_drain(const queue_elem_t *output_q_elem); -void output_queue_buffering_drain(void); - -/** Convert an EM-event into an ODP-event */ -static inline odp_event_t -event_em2odp(em_event_t event) -{ - /* Valid for both ESV enabled and disabled */ - evhdl_t evhdl = {.event = event}; - - return (odp_event_t)(uintptr_t)evhdl.evptr; -} - -/** - * Convert an ODP-event into an EM-event - * - * @note The returned EM-event does NOT contain the ESV event-generation-count - * evhdl_t::evgen! This must be set separately when using ESV. - */ -static inline em_event_t -event_odp2em(odp_event_t odp_event) -{ - /* Valid for both ESV enabled and disabled */ - - /* - * Setting 'evhdl.event = odp_event' is equal to - * 'evhdl.evptr = odp_event, evhdl.evgen = 0' - * (evhdl.evgen still needs to be set when using ESV) - */ - evhdl_t evhdl = {.event = (em_event_t)(uintptr_t)odp_event}; - - return evhdl.event; -} - -/** Convert an array of EM-events into an array ODP-events */ -static inline void -events_em2odp(const em_event_t events[/*in*/], - odp_event_t odp_events[/*out*/], const unsigned int num) -{ - /* Valid for both ESV enabled and disabled */ - const evhdl_t *const evhdls = (const evhdl_t *)events; - - for (unsigned int i = 0; i < num; i++) - odp_events[i] = (odp_event_t)(uintptr_t)evhdls[i].evptr; -} - -/** - * Convert an array of ODP-events into an array of EM-events - * - * @note The output EM-events do NOT contain the ESV event-generation-count - * evhdl_t::evgen! This must be set separately when using ESV. - */ -static inline void -events_odp2em(const odp_event_t odp_events[/*in*/], - em_event_t events[/*out*/], const unsigned int num) -{ - /* Valid for both ESV enabled and disabled */ - evhdl_t *const evhdls = (evhdl_t *)events; - - /* - * Setting 'evhdls[i].event = odp_events[i]' is equal to - * 'evhdls[i].evptr = odp_events[i], evhdl[i].evgen = 0' - * (evhdls[i].evgen still needs to be set when using ESV) - */ - for (unsigned int i = 0; i < num; i++) - evhdls[i].event = (em_event_t)(uintptr_t)odp_events[i]; -} - -/** - * Initialize the event header of a packet allocated outside of EM. - */ -static inline em_event_t -evhdr_init_pkt(event_hdr_t *ev_hdr, em_event_t event, - odp_packet_t odp_pkt, bool is_extev) -{ - const void *user_ptr = odp_packet_user_ptr(odp_pkt); - const bool esv_ena = esv_enabled(); - - if (user_ptr == PKT_USERPTR_MAGIC_NBR) { - /* Event already initialized by EM */ - if (esv_ena) - return ev_hdr->event; - else - return event; - } - - /* - * ODP pkt from outside of EM - not allocated by EM & needs init - */ - odp_packet_user_ptr_set(odp_pkt, PKT_USERPTR_MAGIC_NBR); - ev_hdr->user_area.all = 0; /* uarea fields init when used */ - ev_hdr->event_type = EM_EVENT_TYPE_PACKET; - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - if (!esv_ena) { - ev_hdr->event = event; - return event; - } - - /* - * ESV enabled: - */ - if (!em_shm->opt.esv.prealloc_pools) { - event = evstate_init(event, ev_hdr, is_extev); - } else { - /* esv.prealloc_pools == true: */ - odp_pool_t odp_pool = odp_packet_pool(odp_pkt); - em_pool_t pool = pool_odp2em(odp_pool); - - if (pool == EM_POOL_UNDEF) { - /* External odp pkt originates from an ODP-pool */ - event = evstate_init(event, ev_hdr, is_extev); - } else { - /* External odp pkt originates from an EM-pool */ - event = evstate_update(event, ev_hdr, is_extev); - } - } - - return event; -} - -static inline void -evhdr_init_pkt_multi(event_hdr_t *ev_hdrs[/*out*/], - em_event_t events[/*in,out*/], - const odp_packet_t odp_pkts[/*in*/], - const int num, bool is_extev) -{ - const bool esv_ena = esv_enabled(); - const void *user_ptr; - - int needs_init_idx[num]; - int needs_init_num = 0; - int idx; - - for (int i = 0; i < num; i++) { - user_ptr = odp_packet_user_ptr(odp_pkts[i]); - if (user_ptr == PKT_USERPTR_MAGIC_NBR) { - /* Event already initialized by EM */ - if (esv_ena) - events[i] = ev_hdrs[i]->event; - /* else events[i] = events[i] */ - } else { - needs_init_idx[needs_init_num] = i; - needs_init_num++; - } - } - - if (needs_init_num == 0) - return; - - /* - * ODP pkt from outside of EM - not allocated by EM & needs init - */ - - if (!esv_ena) { - for (int i = 0; i < needs_init_num; i++) { - idx = needs_init_idx[i]; - odp_packet_user_ptr_set(odp_pkts[idx], PKT_USERPTR_MAGIC_NBR); - ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */ - ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET; - ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF; - ev_hdrs[idx]->event = events[idx]; - } - - return; - } - - /* - * ESV enabled: - */ - for (int i = 0; i < needs_init_num; i++) { - idx = needs_init_idx[i]; - odp_packet_user_ptr_set(odp_pkts[idx], PKT_USERPTR_MAGIC_NBR); - ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */ - ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET; - ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF; - } - - if (!em_shm->opt.esv.prealloc_pools) { - for (int i = 0; i < needs_init_num; i++) { - idx = needs_init_idx[i]; - events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev); - } - - return; - } - - /* - * em_shm->opt.esv.prealloc_pools == true - */ - for (int i = 0; i < needs_init_num; i++) { - idx = needs_init_idx[i]; - - odp_pool_t odp_pool = odp_packet_pool(odp_pkts[idx]); - em_pool_t pool = pool_odp2em(odp_pool); - - if (pool == EM_POOL_UNDEF) { - /* External odp pkt originates from an ODP-pool */ - events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev); - } else { - /* External odp pkt originates from an EM-pool */ - events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev); - } - } -} - -/** - * Initialize an external ODP event that have been input into EM. - * - * Initialize the event header if needed, i.e. if event originated from outside - * of EM from pktio or other input and was not allocated by EM via em_alloc(). - * The odp pkt-user-ptr is used to determine whether the header has been - * initialized or not. - */ -static inline em_event_t -event_init_odp(odp_event_t odp_event, bool is_extev, event_hdr_t **ev_hdr__out) -{ - const odp_event_type_t odp_type = odp_event_type(odp_event); - em_event_t event = event_odp2em(odp_event); /* return value */ - - switch (odp_type) { - case ODP_EVENT_PACKET: { - odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt); - - /* init event-hdr if needed (also ESV-state if used) */ - event = evhdr_init_pkt(ev_hdr, event, odp_pkt, is_extev); - if (ev_hdr__out) - *ev_hdr__out = ev_hdr; - return event; - } - case ODP_EVENT_BUFFER: { - const bool esv_ena = esv_enabled(); - - if (!ev_hdr__out && !esv_ena) - return event; - - odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); - event_hdr_t *ev_hdr = odp_buffer_addr(odp_buf); - - if (esv_ena) /* update event handle (ESV) */ - event = ev_hdr->event; - if (ev_hdr__out) - *ev_hdr__out = ev_hdr; - return event; - } - default: - INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), - EM_ESCOPE_EVENT_INIT_ODP, - "Unexpected odp event type:%u", odp_type); - /* never reached */ - return EM_EVENT_UNDEF; - } -} - -/* Helper to event_init_odp_multi() */ -static inline void -event_init_pkt_multi(const odp_packet_t odp_pkts[/*in*/], - em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/], - const int num, bool is_extev) -{ - for (int i = 0; i < num; i++) - ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]); - - evhdr_init_pkt_multi(ev_hdrs, events, odp_pkts, num, is_extev); -} - -/* Helper to event_init_odp_multi() */ -static inline void -event_init_buf_multi(const odp_buffer_t odp_bufs[/*in*/], - em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/], - const int num) -{ - for (int i = 0; i < num; i++) - ev_hdrs[i] = odp_buffer_addr(odp_bufs[i]); - - if (esv_enabled()) { - /* update event handle (ESV) */ - for (int i = 0; i < num; i++) - events[i] = ev_hdrs[i]->event; - } -} - -/** - * Convert from EM events to event headers and initialize the headers as needed. - * - * Initialize the event header if needed, i.e. if event originated from outside - * of EM from pktio or other input and was not allocated by EM via em_alloc(). - * The odp pkt-user-ptr is used to determine whether the header has been - * initialized or not. - */ -static inline void -event_init_odp_multi(const odp_event_t odp_events[/*in*/], - em_event_t events[/*out*/], event_hdr_t *ev_hdrs[/*out*/], - const int num, bool is_extev) -{ - odp_event_type_t odp_type; - int ev = 0; /* event & ev_hdr tbl index*/ - - events_odp2em(odp_events, events/*out*/, num); - - do { - int num_type = odp_event_type_multi(&odp_events[ev], num - ev, - &odp_type /*out*/); - if (likely(odp_type == ODP_EVENT_PACKET)) { - odp_packet_t odp_pkts[num]; - - odp_packet_from_event_multi(odp_pkts /*out*/, - &odp_events[ev], - num_type); - event_init_pkt_multi(odp_pkts /*in*/, - &events[ev] /*in,out*/, - &ev_hdrs[ev] /*out*/, - num_type, is_extev); - } else if (likely(odp_type == ODP_EVENT_BUFFER)) { - odp_buffer_t odp_bufs[num]; - - for (int i = 0; i < num_type; i++) - odp_bufs[i] = odp_buffer_from_event(odp_events[ev + i]); - - event_init_buf_multi(odp_bufs /*in*/, - &events[ev] /*in,out*/, - &ev_hdrs[ev] /*out*/, - num_type); - } else { - INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), - EM_ESCOPE_EVENT_INIT_ODP_MULTI, - "Unexpected odp event type:%u (%d events)", - odp_type, num_type); - __builtin_unreachable(); - } - - ev += num_type; - } while (ev < num); -} - -/** - * Convert from EM event to event header. - * - * Does NOT initialize the event header. - */ -static inline event_hdr_t * -event_to_hdr(em_event_t event) -{ - odp_event_t odp_event = event_em2odp(event); - odp_packet_t odp_pkt; - odp_buffer_t odp_buf; - event_hdr_t *ev_hdr; - - odp_event_type_t evtype = odp_event_type(odp_event); - - switch (evtype) { - case ODP_EVENT_PACKET: - odp_pkt = odp_packet_from_event(odp_event); - ev_hdr = odp_packet_user_area(odp_pkt); - break; - case ODP_EVENT_BUFFER: - odp_buf = odp_buffer_from_event(odp_event); - ev_hdr = odp_buffer_addr(odp_buf); - break; - default: - INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), - EM_ESCOPE_EVENT_TO_HDR, - "Unexpected odp event type:%u", evtype); - /* avoids: "error: 'ev_hdr' may be used uninitialized" */ - __builtin_unreachable(); - break; - } - - return ev_hdr; -} - -/** - * Convert from EM events to event headers. - * - * Does NOT initialize the event headers. - * - * @param[in] events Input array of 'num' valid events - * @param[out] ev_hdrs Output array with room to store 'num' pointers to the - * corresponding event headers - * @param num Number of entries in 'events[]' and 'ev_hdrs[]' - */ -static inline void -event_to_hdr_multi(const em_event_t events[], event_hdr_t *ev_hdrs[/*out*/], - const int num) -{ - odp_event_t odp_events[num]; - odp_packet_t odp_pkts[num]; - odp_buffer_t odp_buf; - odp_event_type_t evtype; - int num_type; - int ev = 0; /* event & ev_hdr tbl index*/ - int i; - - events_em2odp(events, odp_events/*out*/, num); - - do { - num_type = - odp_event_type_multi(&odp_events[ev], num - ev, &evtype/*out*/); - - switch (evtype) { - case ODP_EVENT_PACKET: - odp_packet_from_event_multi(odp_pkts, &odp_events[ev], - num_type); - for (i = 0; i < num_type; i++) - ev_hdrs[ev + i] = odp_packet_user_area(odp_pkts[i]); - break; - - case ODP_EVENT_BUFFER: - for (i = 0; i < num_type; i++) { - odp_buf = odp_buffer_from_event(odp_events[ev + i]); - ev_hdrs[ev + i] = odp_buffer_addr(odp_buf); - } - break; - default: - INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), - EM_ESCOPE_EVENT_TO_HDR_MULTI, - "Unexpected odp event type:%u", evtype); - /* unreachable */ - __builtin_unreachable(); - break; - } - - ev += num_type; - } while (ev < num); -} - -/** Convert from event header to EM event */ -static inline em_event_t -event_hdr_to_event(const event_hdr_t *const event_hdr) -{ - return event_hdr->event; -} - -/** - * Allocate & initialize an event based on an odp-buf. - */ -static inline event_hdr_t * -event_alloc_buf(const mpool_elem_t *const pool_elem, - size_t size, em_event_type_t type) -{ - odp_buffer_t odp_buf = ODP_BUFFER_INVALID; - int subpool; - - /* - * Allocate from the 'best fit' subpool, or if that is full, from the - * next subpool that has buffers available of a bigger size. - */ - subpool = pool_find_subpool(pool_elem, size); - if (unlikely(subpool < 0)) - return NULL; - - for (; subpool < pool_elem->num_subpools; subpool++) { - odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; - - if (EM_CHECK_LEVEL > 1 && - unlikely(odp_pool == ODP_POOL_INVALID)) - return NULL; - - odp_buf = odp_buffer_alloc(odp_pool); - if (likely(odp_buf != ODP_BUFFER_INVALID)) - break; - } - - if (unlikely(odp_buf == ODP_BUFFER_INVALID)) - return NULL; - - /* - * odp buffer now allocated - init the EM event header - * at the beginning of the buffer. - */ - event_hdr_t *const ev_hdr = odp_buffer_addr(odp_buf); - odp_event_t odp_event = odp_buffer_to_event(odp_buf); - em_event_t event = event_odp2em(odp_event); - - ev_hdr->user_area.all = 0; - ev_hdr->user_area.req_size = pool_elem->user_area.req_size; - ev_hdr->user_area.pad_size = pool_elem->user_area.pad_size; - ev_hdr->user_area.isinit = 1; - - ev_hdr->event = event; /* store this event handle */ - /* For optimization, no initialization for feature variables */ - ev_hdr->event_size = size; /* store requested size */ - ev_hdr->align_offset = pool_elem->align_offset; - ev_hdr->event_type = type; /* store the event type */ - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - return ev_hdr; -} - -/** - * Allocate & initialize multiple events based on odp-bufs. - */ -static inline int -event_alloc_buf_multi(em_event_t events[/*out*/], const int num, - const mpool_elem_t *pool_elem, size_t size, - em_event_type_t type) -{ - odp_buffer_t odp_bufs[num]; - odp_event_t odp_event; - event_hdr_t *ev_hdrs[num]; - int subpool; - const bool esv_ena = esv_enabled(); - - /* - * Allocate from the 'best fit' subpool, or if that is full, from the - * next subpool that has buffers available of a bigger size. - */ - subpool = pool_find_subpool(pool_elem, size); - if (unlikely(subpool < 0)) - return 0; - - int num_req = num; - int num_bufs = 0; - int i; - - for (; subpool < pool_elem->num_subpools; subpool++) { - odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; - - if (EM_CHECK_LEVEL > 1 && - unlikely(odp_pool == ODP_POOL_INVALID)) - return 0; - - int ret = odp_buffer_alloc_multi(odp_pool, &odp_bufs[num_bufs], - num_req); - if (unlikely(ret <= 0)) - continue; /* try next subpool */ - - /* store the allocated events[] */ - for (i = num_bufs; i < num_bufs + ret; i++) { - odp_event = odp_buffer_to_event(odp_bufs[i]); - events[i] = event_odp2em(odp_event); - } - - /* Init 'ret' ev-hdrs from this 'subpool'=='odp-pool' */ - for (i = num_bufs; i < num_bufs + ret; i++) - ev_hdrs[i] = odp_buffer_addr(odp_bufs[i]); - - if (esv_ena) - evstate_alloc_multi(&events[num_bufs] /*in/out*/, - &ev_hdrs[num_bufs], ret); - - for (i = num_bufs; i < num_bufs + ret; i++) { - /* For optimization, no init for feature vars */ - if (!esv_ena) - ev_hdrs[i]->event = events[i]; - - ev_hdrs[i]->user_area.all = 0; - ev_hdrs[i]->user_area.req_size = pool_elem->user_area.req_size; - ev_hdrs[i]->user_area.pad_size = pool_elem->user_area.pad_size; - ev_hdrs[i]->user_area.isinit = 1; - - ev_hdrs[i]->event_size = size; - ev_hdrs[i]->align_offset = pool_elem->align_offset; - ev_hdrs[i]->event_type = type; - ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; - } - - num_bufs += ret; - if (likely(num_bufs == num)) - break; /* all allocated */ - num_req -= ret; - } - - return num_bufs; /* number of allocated bufs (0 ... num) */ -} - -/** - * Allocate & initialize an event based on an odp-pkt. - */ -static inline event_hdr_t * -event_alloc_pkt(const mpool_elem_t *pool_elem, - size_t size, em_event_type_t type) -{ - const uint32_t push_len = pool_elem->align_offset; - uint32_t pull_len; - size_t alloc_size; - odp_packet_t odp_pkt = ODP_PACKET_INVALID; - int subpool; - - if (size > push_len) { - alloc_size = size - push_len; - pull_len = 0; - } else { - alloc_size = 1; /* min allowed */ - pull_len = push_len + 1 - size; - } - - /* - * Allocate from the 'best fit' subpool, or if that is full, from the - * next subpool that has pkts available of a bigger size. - */ - subpool = pool_find_subpool(pool_elem, size); - if (unlikely(subpool < 0)) - return NULL; - - for (; subpool < pool_elem->num_subpools; subpool++) { - odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; - - if (EM_CHECK_LEVEL > 1 && - unlikely(odp_pool == ODP_POOL_INVALID)) - return NULL; - - odp_pkt = odp_packet_alloc(odp_pool, alloc_size); - if (likely(odp_pkt != ODP_PACKET_INVALID)) - break; - } - - if (unlikely(odp_pkt == ODP_PACKET_INVALID)) - return NULL; - - /* - * odp packet now allocated - adjust the payload start address and - * init the EM event header in the odp-pkt user-area - */ - - /* Adjust event payload start-address based on alignment config */ - const void *ptr; - - if (push_len) { - ptr = odp_packet_push_head(odp_pkt, push_len); - if (unlikely(!ptr)) - goto err_pktalloc; - } - if (pull_len) { - ptr = odp_packet_pull_tail(odp_pkt, pull_len); - if (unlikely(!ptr)) - goto err_pktalloc; - } - - /* - * Set the pkt user ptr to be able to recognize pkt-events that - * EM has created vs pkts from pkt-input that needs their - * ev-hdrs to be initialized. - */ - odp_packet_user_ptr_set(odp_pkt, PKT_USERPTR_MAGIC_NBR); - - event_hdr_t *const ev_hdr = odp_packet_user_area(odp_pkt); - odp_event_t odp_event = odp_packet_to_event(odp_pkt); - em_event_t event = event_odp2em(odp_event); - - if (unlikely(ev_hdr == NULL)) - goto err_pktalloc; - - ev_hdr->user_area.all = 0; - ev_hdr->user_area.req_size = pool_elem->user_area.req_size; - ev_hdr->user_area.pad_size = pool_elem->user_area.pad_size; - ev_hdr->user_area.isinit = 1; - - ev_hdr->event = event; /* store this event handle */ - ev_hdr->event_size = size; /* store requested size */ - /* ev_hdr->align_offset = needed by odp bufs only */ - ev_hdr->event_type = type; /* store the event type */ - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - return ev_hdr; - -err_pktalloc: - odp_packet_free(odp_pkt); - return NULL; -} - -/* - * Helper for event_alloc_pkt_multi() - */ -static inline int -pktalloc_multi(odp_packet_t odp_pkts[/*out*/], int num, - odp_pool_t odp_pool, size_t size, - uint32_t push_len, uint32_t pull_len) -{ - int ret = odp_packet_alloc_multi(odp_pool, size, odp_pkts, num); - - if (unlikely(ret <= 0)) - return 0; - - const int num_pkts = ret; /* return value > 0 */ - const void *ptr = NULL; - int i; - - /* Adjust payload start-address based on alignment config */ - if (push_len) { - for (i = 0; i < num_pkts; i++) { - ptr = odp_packet_push_head(odp_pkts[i], push_len); - if (unlikely(!ptr)) - goto err_pktalloc_multi; - } - } - if (pull_len) { - for (i = 0; i < num_pkts; i++) { - ptr = odp_packet_pull_tail(odp_pkts[i], pull_len); - if (unlikely(!ptr)) - goto err_pktalloc_multi; /* only before esv */ - } - } - - /* - * Set the pkt user ptr to be able to recognize pkt-events that - * EM has created vs pkts from pkt-input that needs their - * ev-hdrs to be initialized. - */ - for (i = 0; i < num_pkts; i++) - odp_packet_user_ptr_set(odp_pkts[i], PKT_USERPTR_MAGIC_NBR); - - return num_pkts; - -err_pktalloc_multi: - odp_packet_free_multi(odp_pkts, num_pkts); - return 0; -} - -/** - * Allocate & initialize multiple events based on odp-pkts. - */ -static inline int -event_alloc_pkt_multi(em_event_t events[/*out*/], const int num, - const mpool_elem_t *pool_elem, size_t size, - em_event_type_t type) -{ - const uint32_t push_len = pool_elem->align_offset; - uint32_t pull_len; - odp_packet_t odp_pkts[num]; - /* use same output-array: odp_events[] = events[] */ - odp_event_t *const odp_events = (odp_event_t *)events; - event_hdr_t *ev_hdrs[num]; - size_t alloc_size; - int subpool; - const bool esv_ena = esv_enabled(); - - if (size > push_len) { - alloc_size = size - push_len; - pull_len = 0; - } else { - alloc_size = 1; /* min allowed */ - pull_len = push_len + 1 - size; - } - - /* - * Allocate from the 'best fit' subpool, or if that is full, from the - * next subpool that has pkts available of a bigger size. - */ - subpool = pool_find_subpool(pool_elem, size); - if (unlikely(subpool < 0)) - return 0; - - int num_req = num; - int num_pkts = 0; - int i; - - for (; subpool < pool_elem->num_subpools; subpool++) { - odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; - - if (EM_CHECK_LEVEL > 1 && - unlikely(odp_pool == ODP_POOL_INVALID)) - return 0; - - int ret = pktalloc_multi(&odp_pkts[num_pkts], num_req, - odp_pool, alloc_size, - push_len, pull_len); - if (unlikely(ret <= 0)) - continue; /* try next subpool */ - - /* - * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'. - * Note: odp_events[] points&writes into events[out] - */ - odp_packet_to_event_multi(&odp_pkts[num_pkts], - &odp_events[num_pkts], ret); - - for (i = num_pkts; i < num_pkts + ret; i++) - ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]); - - /* - * Note: events[] == odp_events[] before ESV init. - * Don't touch odp_events[] during this loop-round anymore. - */ - if (esv_ena) - evstate_alloc_multi(&events[num_pkts] /*in/out*/, - &ev_hdrs[num_pkts], ret); - - for (i = num_pkts; i < num_pkts + ret; i++) { - /* For optimization, no init for feature vars */ - if (!esv_ena) - ev_hdrs[i]->event = events[i]; - - ev_hdrs[i]->user_area.all = 0; - ev_hdrs[i]->user_area.req_size = pool_elem->user_area.req_size; - ev_hdrs[i]->user_area.pad_size = pool_elem->user_area.pad_size; - ev_hdrs[i]->user_area.isinit = 1; - - ev_hdrs[i]->event_size = size; - /* ev_hdr->align_offset = needed by odp bufs only */ - ev_hdrs[i]->event_type = type; - ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; - } - - num_pkts += ret; - if (likely(num_pkts == num)) - break; /* all allocated */ - num_req -= ret; - } - - return num_pkts; /* number of allocated pkts */ -} - -/** - * Helper for em_alloc() and em_event_clone() - */ -static inline event_hdr_t * -event_alloc(const mpool_elem_t *pool_elem, size_t size, em_event_type_t type) -{ - /* - * EM event pools created with type=PKT can support: - * - SW events (bufs) - * - pkt events. - * - * EM event pools created with type=SW can support: - * - SW events (bufs) only - */ - event_hdr_t *ev_hdr = NULL; - - if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) - ev_hdr = event_alloc_pkt(pool_elem, size, type); - else if (pool_elem->event_type == EM_EVENT_TYPE_SW) - ev_hdr = event_alloc_buf(pool_elem, size, type); - - /* event now allocated (if !NULL): ev_hdr->event */ - - /* - * ESV state update for the event still needs to be done by the caller, - * not done here since there are different callers of this function. - * if (esv_enabled()) - * event = evstate_alloc/clone/...(event, ev_hdr); - */ - - return ev_hdr; /* can be NULL */ -} - -/** - * Start-up helper for pool preallocation - */ -static inline em_event_t -event_prealloc(const mpool_elem_t *pool_elem, size_t size, em_event_type_t type) -{ - /* - * EM event pools created with type=PKT can support: - * - SW events (bufs) - * - pkt events. - * - * EM event pools created with type=SW can support: - * - SW events (bufs) only - */ - event_hdr_t *ev_hdr = NULL; - - if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) - ev_hdr = event_alloc_pkt(pool_elem, size, type); - else if (pool_elem->event_type == EM_EVENT_TYPE_SW) - ev_hdr = event_alloc_buf(pool_elem, size, type); - - if (unlikely(ev_hdr == NULL)) - return EM_EVENT_UNDEF; - - /* event now allocated */ - em_event_t event = ev_hdr->event; - - if (esv_enabled()) - event = evstate_prealloc(event, ev_hdr); - - return event; -} - -static inline event_hdr_t * -start_node_to_event_hdr(list_node_t *const list_node) -{ - event_hdr_t *const ev_hdr = (event_hdr_t *)(uintptr_t) - ((uint8_t *)list_node - offsetof(event_hdr_t, start_node)); - - return likely(list_node != NULL) ? ev_hdr : NULL; -} - -static inline em_status_t -send_event(em_event_t event, const queue_elem_t *q_elem) -{ - odp_event_t odp_event = event_em2odp(event); - odp_queue_t odp_queue = q_elem->odp_queue; - int ret; - - if (unlikely(EM_CHECK_LEVEL > 1 && - (odp_event == ODP_EVENT_INVALID || - odp_queue == ODP_QUEUE_INVALID))) - return EM_ERR_NOT_FOUND; - - if (unlikely(EM_CHECK_LEVEL > 0 && - q_elem->state != EM_QUEUE_STATE_READY)) { - return EM_ERR_BAD_STATE; - } - - ret = odp_queue_enq(odp_queue, odp_event); - if (unlikely(EM_CHECK_LEVEL > 0 && ret != 0)) - return EM_ERR_LIB_FAILED; - - return EM_OK; -} - -static inline int -send_event_multi(const em_event_t events[], const int num, - const queue_elem_t *q_elem) -{ - odp_event_t odp_events[num]; - odp_queue_t odp_queue = q_elem->odp_queue; - int ret; - - if (unlikely(EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID)) - return 0; - - if (unlikely(EM_CHECK_LEVEL > 0 && - q_elem->state != EM_QUEUE_STATE_READY)) { - return 0; - } - - events_em2odp(events, odp_events/*out*/, num); - - ret = odp_queue_enq_multi(odp_queue, odp_events, num); - if (unlikely(ret < 0)) - return 0; - - return ret; -} - -static inline em_status_t -send_local(em_event_t event, event_hdr_t *const ev_hdr, - queue_elem_t *const q_elem) -{ - em_locm_t *const locm = &em_locm; - const em_queue_prio_t prio = q_elem->priority; - odp_event_t odp_event = event_em2odp(event); - int ret; - - if (unlikely(EM_CHECK_LEVEL > 0 && - q_elem->state != EM_QUEUE_STATE_READY)) - return EM_ERR_BAD_STATE; - - ev_hdr->q_elem = q_elem; - - ret = odp_queue_enq(locm->local_queues.prio[prio].queue, odp_event); - if (likely(ret == 0)) { - locm->local_queues.empty = 0; - locm->local_queues.prio[prio].empty_prio = 0; - return EM_OK; - } - - return EM_ERR_LIB_FAILED; -} - -static inline int -send_local_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], - const int num, queue_elem_t *const q_elem) -{ - em_locm_t *const locm = &em_locm; - const em_queue_prio_t prio = q_elem->priority; - odp_event_t odp_events[num]; - int enq; - int i; - - if (unlikely(EM_CHECK_LEVEL > 0 && - q_elem->state != EM_QUEUE_STATE_READY)) - return 0; - - for (i = 0; i < num; i++) - ev_hdrs[i]->q_elem = q_elem; - - events_em2odp(events, odp_events, num); - - enq = odp_queue_enq_multi(locm->local_queues.prio[prio].queue, - odp_events, num); - if (likely(enq > 0)) { - locm->local_queues.empty = 0; - locm->local_queues.prio[prio].empty_prio = 0; - return enq; - } - - return 0; -} - -/** - * Send one event to a queue of type EM_QUEUE_TYPE_OUTPUT - */ -static inline em_status_t -send_output(em_event_t event, event_hdr_t *const ev_hdr, - queue_elem_t *const output_q_elem) -{ - const em_sched_context_type_t sched_ctx_type = - em_locm.current.sched_context_type; - - if (unlikely(EM_CHECK_LEVEL > 0 && - output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) - return EM_ERR_BAD_STATE; - - /* - * An event sent to an output queue from an ordered context needs to - * be 're-ordered' before calling the user provided output-function. - * Order is maintained by enqueuing and dequeuing into an odp-queue - * that takes care of order. - */ - if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) { - const odp_queue_t odp_queue = output_q_elem->odp_queue; - odp_event_t odp_event = event_em2odp(event); - int ret; - - if (unlikely(EM_CHECK_LEVEL > 1 && - (odp_event == ODP_EVENT_INVALID || - odp_queue == ODP_QUEUE_INVALID))) - return EM_ERR_NOT_FOUND; - - if (!EM_OUTPUT_QUEUE_IMMEDIATE) - output_queue_track(output_q_elem); - - /* enqueue to enforce odp to handle ordering */ - ret = odp_queue_enq(odp_queue, odp_event); - if (unlikely(ret != 0)) - return EM_ERR_LIB_FAILED; - - /* return value must be EM_OK after this since event enqueued */ - - if (EM_OUTPUT_QUEUE_IMMEDIATE) { - env_spinlock_t *const lock = - &output_q_elem->output.lock; - - if (!env_spinlock_trylock(lock)) - return EM_OK; - output_queue_drain(output_q_elem); - env_spinlock_unlock(lock); - } - - return EM_OK; - } - - /* - * No ordered context - call output_fn() directly - */ - const em_queue_t output_queue = output_q_elem->queue; - const em_output_func_t output_fn = - output_q_elem->output.output_conf.output_fn; - void *const output_fn_args = - output_q_elem->output.output_conf.output_fn_args; - int sent; - - if (!esv_enabled()) { - sent = output_fn(&event, 1, output_queue, output_fn_args); - if (unlikely(sent != 1)) - return EM_ERR_OPERATION_FAILED; - return EM_OK; - } - - /* - * ESV enabled: - */ - event = evstate_em2usr(event, ev_hdr, EVSTATE__OUTPUT); - sent = output_fn(&event, 1, output_queue, output_fn_args); - if (likely(sent == 1)) - return EM_OK; /* output success! */ - - /* revert event-state on output-error */ - event = evstate_em2usr_revert(event, ev_hdr, EVSTATE__OUTPUT__FAIL); - - return EM_ERR_OPERATION_FAILED; -} - -/** - * Send events to a queue of type EM_QUEUE_TYPE_OUTPUT - */ -static inline int -send_output_multi(const em_event_t events[], event_hdr_t *const ev_hdrs[], - const unsigned int num, queue_elem_t *const output_q_elem) -{ - const em_sched_context_type_t sched_ctx_type = - em_locm.current.sched_context_type; - int sent; - - if (unlikely(EM_CHECK_LEVEL > 0 && - output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) - return 0; - - /* - * Event sent to an output queue from an ordered context needs to - * be 're-ordered' before calling the user provided output-function. - * Order is maintained by enqueuing and dequeuing into an odp-queue - * that takes care of order. - */ - if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) { - const odp_queue_t odp_queue = output_q_elem->odp_queue; - odp_event_t odp_events[num]; - - if (unlikely(EM_CHECK_LEVEL > 1 && - odp_queue == ODP_QUEUE_INVALID)) - return 0; - - if (!EM_OUTPUT_QUEUE_IMMEDIATE) - output_queue_track(output_q_elem); - - events_em2odp(events, odp_events/*out*/, num); - - /* enqueue to enforce odp to handle ordering */ - sent = odp_queue_enq_multi(odp_queue, odp_events, num); - if (unlikely(sent <= 0)) - return 0; - - /* the return value must be the number of enqueued events */ - - if (EM_OUTPUT_QUEUE_IMMEDIATE) { - env_spinlock_t *const lock = - &output_q_elem->output.lock; - - if (!env_spinlock_trylock(lock)) - return sent; - output_queue_drain(output_q_elem); - env_spinlock_unlock(lock); - } - - return sent; - } - - /* - * No ordered context - call output_fn() directly - */ - const em_queue_t output_queue = output_q_elem->queue; - const em_output_func_t output_fn = output_q_elem->output.output_conf.output_fn; - void *const output_fn_args = output_q_elem->output.output_conf.output_fn_args; - - if (!esv_enabled()) - return output_fn(events, num, output_queue, output_fn_args); - - /* - * ESV enabled: - */ - em_event_t tmp_events[num]; - - /* need copy, don't change "const events[]" */ - for (unsigned int i = 0; i < num; i++) - tmp_events[i] = events[i]; - evstate_em2usr_multi(tmp_events/*in/out*/, ev_hdrs, num, - EVSTATE__OUTPUT_MULTI); - sent = output_fn(tmp_events, num, output_queue, output_fn_args); - - if (unlikely(sent < (int)num && sent >= 0)) - evstate_em2usr_revert_multi(&tmp_events[sent]/*in/out*/, - &ev_hdrs[sent], num - sent, - EVSTATE__OUTPUT_MULTI__FAIL); - return sent; -} - -/** - * Return a pointer to the EM event user payload. - * Helper to e.g. EM API em_event_pointer() - */ -static inline void * -event_pointer(em_event_t event) -{ - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t odp_etype = odp_event_type(odp_event); - void *ev_ptr = NULL; /* return value */ - - if (odp_etype == ODP_EVENT_PACKET) { - odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - - ev_ptr = odp_packet_data(odp_pkt); - } else if (odp_etype == ODP_EVENT_BUFFER) { - odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); - const event_hdr_t *ev_hdr = odp_buffer_addr(odp_buf); - size_t uarea_pad_sz = 0; - - if (ev_hdr->user_area.isinit) - uarea_pad_sz = ev_hdr->user_area.pad_size; - - ev_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t) - + uarea_pad_sz - ev_hdr->align_offset); - } - - return ev_ptr; /* NULL for unrecognized odp_etype */ -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EVENT_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM internal event functions + * + */ + +#ifndef EM_EVENT_H_ +#define EM_EVENT_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef __clang__ +COMPILE_TIME_ASSERT((uintptr_t)EM_EVENT_UNDEF == (uintptr_t)ODP_EVENT_INVALID, + EM_EVENT_NOT_EQUAL_TO_ODP_EVENT); +#endif + +em_status_t event_init(void); +void print_event_info(void); +em_event_t pkt_clone_odp(odp_packet_t pkt, odp_pool_t pkt_pool); +void output_queue_track(queue_elem_t *const output_q_elem); +void output_queue_drain(const queue_elem_t *output_q_elem); +void output_queue_buffering_drain(void); + +uint32_t event_vector_tbl(em_event_t vector_event, em_event_t **event_tbl/*out*/); +em_status_t event_vector_max_size(em_event_t vector_event, uint32_t *max_size /*out*/, + em_escope_t escope); + +/** + * Convert an array of EM-events into an array of ODP-packets. + * The content must be known to be packets. + */ +static inline void +events_em2pkt(const em_event_t events[/*in*/], + odp_packet_t odp_pkts[/*out*/], const unsigned int num) +{ + /* Valid for both ESV enabled and disabled */ + const evhdl_t *const evhdls = (const evhdl_t *)events; + + for (unsigned int i = 0; i < num; i++) + odp_pkts[i] = odp_packet_from_event((odp_event_t)(uintptr_t)evhdls[i].evptr); +} + +/** + * Convert an array of EM-events into an array of ODP-packets in-place (i.e. + * convert using the same memory area for output) when the event type is known + * for sure to be packets. Be careful! + * + * @return Pointer to odp packet table: odp_packet_t pkts[num] + * Uses the same memory area for the output of 'pkts[num]' as for + * the input 'events[num]' (thus overwrites 'events[num]' with + * 'pkts[num]'). + */ +static inline odp_packet_t * +events_em2pkt_inplace(em_event_t events[/*in*/], const unsigned int num) +{ + /* Valid for both ESV enabled and disabled */ + evhdl_t *const evhdls = (evhdl_t *)events; + odp_packet_t *const pkts = (odp_packet_t *)events; /* careful! */ + + /* Careful! Overwrites events[num] with pkts[num] */ + for (unsigned int i = 0; i < num; i++) + pkts[i] = odp_packet_from_event((odp_event_t)(uintptr_t)evhdls[i].evptr); + + return pkts; +} + +/** + * Initialize the event header of a packet allocated outside of EM. + */ +static inline em_event_t +evhdr_init_pkt(event_hdr_t *ev_hdr, em_event_t event, + odp_packet_t odp_pkt, bool is_extev) +{ + const void *user_ptr = odp_packet_user_ptr(odp_pkt); + const bool esv_ena = esv_enabled(); + + if (user_ptr == PKT_USERPTR_MAGIC_NBR) { + /* Event already initialized by EM */ + if (esv_ena) + return ev_hdr->event; + else + return event; + } + + /* + * ODP pkt from outside of EM - not allocated by EM & needs init + */ + odp_packet_user_ptr_set(odp_pkt, PKT_USERPTR_MAGIC_NBR); + ev_hdr->user_area.all = 0; /* uarea fields init when used */ + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdr->event_type = EM_EVENT_TYPE_PACKET; + ev_hdr->flags.all = 0; + + if (!esv_ena) { + ev_hdr->event = event; + return event; + } + + /* + * ESV enabled: + */ + if (!em_shm->opt.esv.prealloc_pools) { + event = evstate_init(event, ev_hdr, is_extev); + } else { + /* esv.prealloc_pools == true: */ + odp_pool_t odp_pool = odp_packet_pool(odp_pkt); + em_pool_t pool = pool_odp2em(odp_pool); + + if (pool == EM_POOL_UNDEF) { + /* External odp pkt originates from an ODP-pool */ + event = evstate_init(event, ev_hdr, is_extev); + } else { + /* External odp pkt originates from an EM-pool */ + event = evstate_update(event, ev_hdr, is_extev); + } + } + + return event; +} + +/** + * Initialize the event headers of packets allocated outside of EM. + */ +static inline void +evhdr_init_pkt_multi(event_hdr_t *const ev_hdrs[], + em_event_t events[/*in,out*/], + const odp_packet_t odp_pkts[/*in*/], + const int num, bool is_extev) +{ + const bool esv_ena = esv_enabled(); + const void *user_ptr; + + int needs_init_idx[num]; + int needs_init_num = 0; + int idx; + + for (int i = 0; i < num; i++) { + user_ptr = odp_packet_user_ptr(odp_pkts[i]); + if (user_ptr == PKT_USERPTR_MAGIC_NBR) { + /* Event already initialized by EM */ + if (esv_ena) + events[i] = ev_hdrs[i]->event; + /* else events[i] = events[i] */ + } else { + needs_init_idx[needs_init_num] = i; + needs_init_num++; + } + } + + if (needs_init_num == 0) + return; + + /* + * ODP pkt from outside of EM - not allocated by EM & needs init + */ + + if (!esv_ena) { + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + odp_packet_user_ptr_set(odp_pkts[idx], PKT_USERPTR_MAGIC_NBR); + ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */ + ev_hdrs[idx]->event = events[idx]; + ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET; + ev_hdrs[idx]->flags.all = 0; + } + + return; + } + + /* + * ESV enabled: + */ + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + odp_packet_user_ptr_set(odp_pkts[idx], PKT_USERPTR_MAGIC_NBR); + ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */ + ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdrs[idx]->event_type = EM_EVENT_TYPE_PACKET; + ev_hdrs[idx]->flags.all = 0; + } + + if (!em_shm->opt.esv.prealloc_pools) { + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev); + } + + return; + } + + /* + * em_shm->opt.esv.prealloc_pools == true + */ + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + + odp_pool_t odp_pool = odp_packet_pool(odp_pkts[idx]); + em_pool_t pool = pool_odp2em(odp_pool); + + if (pool == EM_POOL_UNDEF) { + /* External odp pkt originates from an ODP-pool */ + events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev); + } else { + /* External odp pkt originates from an EM-pool */ + events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev); + } + } +} + +/** + * Initialize the event header of a packet vector allocated outside of EM. + */ +static inline em_event_t +evhdr_init_pktvec(event_hdr_t *ev_hdr, em_event_t event, + odp_packet_vector_t odp_pktvec, bool is_extev) +{ + const int user_flag = odp_packet_vector_user_flag(odp_pktvec); + const bool esv_ena = esv_enabled(); + + if (user_flag == USER_FLAG_SET) { + /* Event already initialized by EM */ + if (esv_ena) + return ev_hdr->event; + else + return event; + } + + /* + * ODP pkt from outside of EM - not allocated by EM & needs init + */ + odp_packet_vector_user_flag_set(odp_pktvec, USER_FLAG_SET); + ev_hdr->user_area.all = 0; /* uarea fields init when used */ + ev_hdr->event_type = EM_EVENT_TYPE_VECTOR; + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + + if (!esv_ena) { + ev_hdr->event = event; + return event; + } + + /* + * ESV enabled: + */ + if (!em_shm->opt.esv.prealloc_pools) { + event = evstate_init(event, ev_hdr, is_extev); + } else { + /* esv.prealloc_pools == true: */ + odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvec); + em_pool_t pool = pool_odp2em(odp_pool); + + if (pool == EM_POOL_UNDEF) { + /* External odp pkt originates from an ODP-pool */ + event = evstate_init(event, ev_hdr, is_extev); + } else { + /* External odp pkt originates from an EM-pool */ + event = evstate_update(event, ev_hdr, is_extev); + } + } + + return event; +} + +/** + * Initialize the event headers of packet vectors allocated outside of EM. + */ +static inline void +evhdr_init_pktvec_multi(event_hdr_t *ev_hdrs[/*out*/], + em_event_t events[/*in,out*/], + const odp_packet_vector_t odp_pktvecs[/*in*/], + const int num, bool is_extev) +{ + const bool esv_ena = esv_enabled(); + + int needs_init_idx[num]; + int needs_init_num = 0; + int idx; + + for (int i = 0; i < num; i++) { + int user_flag = odp_packet_vector_user_flag(odp_pktvecs[i]); + + if (user_flag == USER_FLAG_SET) { + /* Event already initialized by EM */ + if (esv_ena) + events[i] = ev_hdrs[i]->event; + /* else events[i] = events[i] */ + } else { + needs_init_idx[needs_init_num] = i; + needs_init_num++; + } + } + + if (needs_init_num == 0) + return; + + /* + * ODP pkt vector from outside of EM - not allocated by EM & needs init + */ + + if (!esv_ena) { + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + odp_packet_vector_user_flag_set(odp_pktvecs[idx], USER_FLAG_SET); + ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */ + ev_hdrs[idx]->event_type = EM_EVENT_TYPE_VECTOR; + ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdrs[idx]->event = events[idx]; + } + + return; + } + + /* + * ESV enabled: + */ + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + odp_packet_vector_user_flag_set(odp_pktvecs[idx], USER_FLAG_SET); + ev_hdrs[idx]->user_area.all = 0; /* uarea fields init when used */ + ev_hdrs[idx]->event_type = EM_EVENT_TYPE_VECTOR; + ev_hdrs[idx]->egrp = EM_EVENT_GROUP_UNDEF; + } + + if (!em_shm->opt.esv.prealloc_pools) { + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev); + } + + return; + } + + /* + * em_shm->opt.esv.prealloc_pools == true + */ + for (int i = 0; i < needs_init_num; i++) { + idx = needs_init_idx[i]; + + odp_pool_t odp_pool = odp_packet_vector_pool(odp_pktvecs[idx]); + em_pool_t pool = pool_odp2em(odp_pool); + + if (pool == EM_POOL_UNDEF) { + /* External odp pkt originates from an ODP-pool */ + events[idx] = evstate_init(events[idx], ev_hdrs[idx], is_extev); + } else { + /* External odp pkt originates from an EM-pool */ + events[idx] = evstate_update(events[idx], ev_hdrs[idx], is_extev); + } + } +} + +/** + * Initialize an external ODP event that have been input into EM. + * + * Initialize the event header if needed, i.e. if event originated from outside + * of EM from pktio or other input and was not allocated by EM via em_alloc(). + * The odp pkt-user-ptr is used to determine whether the header has been + * initialized or not. + */ +static inline em_event_t +event_init_odp(odp_event_t odp_event, bool is_extev, event_hdr_t **ev_hdr__out) +{ + const odp_event_type_t odp_type = odp_event_type(odp_event); + em_event_t event = event_odp2em(odp_event); /* return value */ + + switch (odp_type) { + case ODP_EVENT_PACKET: { + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + event_hdr_t *ev_hdr = odp_packet_user_area(odp_pkt); + + /* init event-hdr if needed (also ESV-state if used) */ + event = evhdr_init_pkt(ev_hdr, event, odp_pkt, is_extev); + if (ev_hdr__out) + *ev_hdr__out = ev_hdr; + return event; + } + case ODP_EVENT_BUFFER: { + const bool esv_ena = esv_enabled(); + + if (!ev_hdr__out && !esv_ena) + return event; + + odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); + event_hdr_t *ev_hdr = odp_buffer_addr(odp_buf); + + if (esv_ena) /* update event handle (ESV) */ + event = ev_hdr->event; + if (ev_hdr__out) + *ev_hdr__out = ev_hdr; + return event; + } + case ODP_EVENT_PACKET_VECTOR: { + odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event); + event_hdr_t *ev_hdr = odp_packet_vector_user_area(odp_pktvec); + + /* init event-hdr if needed (also ESV-state if used) */ + event = evhdr_init_pktvec(ev_hdr, event, odp_pktvec, is_extev); + if (ev_hdr__out) + *ev_hdr__out = ev_hdr; + return event; + } + default: + INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), + EM_ESCOPE_EVENT_INIT_ODP, + "Unexpected odp event type:%u", odp_type); + __builtin_unreachable(); + /* never reached */ + return EM_EVENT_UNDEF; + } +} + +/* Helper to event_init_odp_multi() */ +static inline void +event_init_pkt_multi(const odp_packet_t odp_pkts[/*in*/], + em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/], + const int num, bool is_extev) +{ + for (int i = 0; i < num; i++) + ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]); + + evhdr_init_pkt_multi(ev_hdrs, events, odp_pkts, num, is_extev); +} + +/* Helper to event_init_odp_multi() */ +static inline void +event_init_buf_multi(const odp_buffer_t odp_bufs[/*in*/], + em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/], + const int num) +{ + for (int i = 0; i < num; i++) + ev_hdrs[i] = odp_buffer_addr(odp_bufs[i]); + + if (esv_enabled()) { + /* update event handle (ESV) */ + for (int i = 0; i < num; i++) + events[i] = ev_hdrs[i]->event; + } +} + +/* Helper to event_init_odp_multi() */ +static inline void +event_init_pktvec_multi(const odp_packet_vector_t odp_pktvecs[/*in*/], + em_event_t events[/*in,out*/], event_hdr_t *ev_hdrs[/*out*/], + const int num, bool is_extev) +{ + for (int i = 0; i < num; i++) + ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]); + + evhdr_init_pktvec_multi(ev_hdrs, events, odp_pktvecs, num, is_extev); +} + +/** + * Convert from EM events to event headers and initialize the headers as needed. + * + * Initialize the event header if needed, i.e. if event originated from outside + * of EM from pktio or other input and was not allocated by EM via em_alloc(). + * The odp pkt-user-ptr is used to determine whether the header has been + * initialized or not. + */ +static inline void +event_init_odp_multi(const odp_event_t odp_events[/*in*/], + em_event_t events[/*out*/], event_hdr_t *ev_hdrs[/*out*/], + const int num, bool is_extev) +{ + odp_event_type_t odp_type; + int ev = 0; /* event & ev_hdr tbl index*/ + + events_odp2em(odp_events, events/*out*/, num); + + do { + int num_type = odp_event_type_multi(&odp_events[ev], num - ev, + &odp_type /*out*/); + switch (odp_type) { + case ODP_EVENT_PACKET: { + odp_packet_t odp_pkts[num]; + + odp_packet_from_event_multi(odp_pkts /*out*/, + &odp_events[ev], + num_type); + event_init_pkt_multi(odp_pkts /*in*/, + &events[ev] /*in,out*/, + &ev_hdrs[ev] /*out*/, + num_type, is_extev); + break; + } + case ODP_EVENT_BUFFER: { + odp_buffer_t odp_bufs[num]; + + for (int i = 0; i < num_type; i++) + odp_bufs[i] = odp_buffer_from_event(odp_events[ev + i]); + + event_init_buf_multi(odp_bufs /*in*/, + &events[ev] /*in,out*/, + &ev_hdrs[ev] /*out*/, + num_type); + break; + } + case ODP_EVENT_PACKET_VECTOR: { + odp_packet_vector_t odp_pktvecs[num]; + + for (int i = 0; i < num_type; i++) + odp_pktvecs[i] = odp_packet_vector_from_event(odp_events[ev + i]); + + event_init_pktvec_multi(odp_pktvecs /*in*/, + &events[ev] /*in,out*/, + &ev_hdrs[ev] /*out*/, + num_type, is_extev); + break; + } + default: + INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), + EM_ESCOPE_EVENT_INIT_ODP_MULTI, + "Unexpected odp event type:%u (%d events)", + odp_type, num_type); + __builtin_unreachable(); + } + + ev += num_type; + } while (ev < num); +} + +/** + * @brief Convert from an event vector to event header + * + * It has to be known that the event is a vector before calling this function, + * otherwise use event_to_hdr(). + * + * @param vector_event EM event of major type EM_EVENT_TYPE_VECTOR + * @return event_hdr_t* Pointer to the event header + * + * Does NOT initialize the event header. + */ +static inline event_hdr_t * +eventvec_to_hdr(em_event_t vector_event) +{ + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event); + event_hdr_t *ev_hdr = odp_packet_vector_user_area(odp_pktvec); + + return ev_hdr; +} + +/** + * Convert from EM event to event header. + * + * Does NOT initialize the event header. + */ +static inline event_hdr_t * +event_to_hdr(em_event_t event) +{ + odp_event_t odp_event = event_em2odp(event); + odp_packet_t odp_pkt; + odp_buffer_t odp_buf; + odp_packet_vector_t odp_pktvec; + event_hdr_t *ev_hdr; + + odp_event_type_t evtype = odp_event_type(odp_event); + + switch (evtype) { + case ODP_EVENT_PACKET: + odp_pkt = odp_packet_from_event(odp_event); + ev_hdr = odp_packet_user_area(odp_pkt); + break; + case ODP_EVENT_BUFFER: + odp_buf = odp_buffer_from_event(odp_event); + ev_hdr = odp_buffer_addr(odp_buf); + break; + case ODP_EVENT_PACKET_VECTOR: + odp_pktvec = odp_packet_vector_from_event(odp_event); + ev_hdr = odp_packet_vector_user_area(odp_pktvec); + break; + default: + INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), + EM_ESCOPE_EVENT_TO_HDR, + "Unexpected odp event type:%u", evtype); + /* avoids: "error: 'ev_hdr' may be used uninitialized" */ + __builtin_unreachable(); + break; + } + + return ev_hdr; +} + +/** + * Convert from EM events to event headers. + * + * Does NOT initialize the event headers. + * + * @param[in] events Input array of 'num' valid events + * @param[out] ev_hdrs Output array with room to store 'num' pointers to the + * corresponding event headers + * @param num Number of entries in 'events[]' and 'ev_hdrs[]' + */ +static inline void +event_to_hdr_multi(const em_event_t events[], event_hdr_t *ev_hdrs[/*out*/], + const int num) +{ + odp_event_t odp_events[num]; + odp_buffer_t odp_buf; + odp_packet_vector_t odp_pktvec; + odp_event_type_t evtype; + int num_type; + int ev = 0; /* event & ev_hdr tbl index*/ + int i; + + events_em2odp(events, odp_events/*out*/, num); + + do { + num_type = + odp_event_type_multi(&odp_events[ev], num - ev, &evtype/*out*/); + + switch (evtype) { + case ODP_EVENT_PACKET: { + odp_packet_t odp_pkts[num]; + + odp_packet_from_event_multi(odp_pkts, &odp_events[ev], + num_type); + for (i = 0; i < num_type; i++) + ev_hdrs[ev + i] = odp_packet_user_area(odp_pkts[i]); + break; + } + case ODP_EVENT_BUFFER: + for (i = 0; i < num_type; i++) { + odp_buf = odp_buffer_from_event(odp_events[ev + i]); + ev_hdrs[ev + i] = odp_buffer_addr(odp_buf); + } + break; + case ODP_EVENT_PACKET_VECTOR: + for (i = 0; i < num_type; i++) { + odp_pktvec = odp_packet_vector_from_event(odp_events[ev + i]); + ev_hdrs[ev + i] = odp_packet_vector_user_area(odp_pktvec); + } + break; + default: + INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), + EM_ESCOPE_EVENT_TO_HDR_MULTI, + "Unexpected odp event type:%u", evtype); + /* unreachable */ + __builtin_unreachable(); + break; + } + + ev += num_type; + } while (ev < num); +} + +/** Convert from event header to EM event */ +static inline em_event_t +event_hdr_to_event(const event_hdr_t *const event_hdr) +{ + return event_hdr->event; +} + +/** + * Allocate & initialize an event based on an odp-buf. + */ +static inline event_hdr_t * +event_alloc_buf(const mpool_elem_t *const pool_elem, + uint32_t size, em_event_type_t type) +{ + odp_buffer_t odp_buf = ODP_BUFFER_INVALID; + int subpool; + + /* + * Allocate from the 'best fit' subpool, or if that is full, from the + * next subpool that has buffers available of a bigger size. + */ + subpool = pool_find_subpool(pool_elem, size); + if (unlikely(subpool < 0)) + return NULL; + + for (; subpool < pool_elem->num_subpools; subpool++) { + odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; + + if (EM_CHECK_LEVEL > 1 && + unlikely(odp_pool == ODP_POOL_INVALID)) + return NULL; + + odp_buf = odp_buffer_alloc(odp_pool); + if (likely(odp_buf != ODP_BUFFER_INVALID)) + break; + } + + if (unlikely(odp_buf == ODP_BUFFER_INVALID)) + return NULL; + + /* + * odp buffer now allocated - init the EM event header + * at the beginning of the buffer. + */ + event_hdr_t *const ev_hdr = odp_buffer_addr(odp_buf); + odp_event_t odp_event = odp_buffer_to_event(odp_buf); + em_event_t event = event_odp2em(odp_event); + + ev_hdr->user_area.all = 0; + ev_hdr->user_area.req_size = pool_elem->user_area.req_size; + ev_hdr->user_area.pad_size = pool_elem->user_area.pad_size; + ev_hdr->user_area.isinit = 1; + + ev_hdr->event = event; /* store this event handle */ + /* For optimization, no initialization for feature variables */ + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdr->event_size = size; /* store requested size */ + ev_hdr->align_offset = pool_elem->align_offset; + ev_hdr->event_type = type; /* store the event type */ + ev_hdr->flags.all = 0; + + return ev_hdr; +} + +/** + * Allocate & initialize multiple events based on odp-bufs. + */ +static inline int +event_alloc_buf_multi(em_event_t events[/*out*/], const int num, + const mpool_elem_t *pool_elem, uint32_t size, + em_event_type_t type) +{ + odp_buffer_t odp_bufs[num]; + odp_event_t odp_event; + event_hdr_t *ev_hdrs[num]; + int subpool; + const bool esv_ena = esv_enabled(); + + /* + * Allocate from the 'best fit' subpool, or if that is full, from the + * next subpool that has buffers available of a bigger size. + */ + subpool = pool_find_subpool(pool_elem, size); + if (unlikely(subpool < 0)) + return 0; + + int num_req = num; + int num_bufs = 0; + int i; + + for (; subpool < pool_elem->num_subpools; subpool++) { + odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; + + if (EM_CHECK_LEVEL > 1 && + unlikely(odp_pool == ODP_POOL_INVALID)) + return 0; + + int ret = odp_buffer_alloc_multi(odp_pool, &odp_bufs[num_bufs], + num_req); + if (unlikely(ret <= 0)) + continue; /* try next subpool */ + + /* store the allocated events[] */ + for (i = num_bufs; i < num_bufs + ret; i++) { + odp_event = odp_buffer_to_event(odp_bufs[i]); + events[i] = event_odp2em(odp_event); + } + + /* Init 'ret' ev-hdrs from this 'subpool'=='odp-pool' */ + for (i = num_bufs; i < num_bufs + ret; i++) + ev_hdrs[i] = odp_buffer_addr(odp_bufs[i]); + + if (esv_ena) + evstate_alloc_multi(&events[num_bufs] /*in/out*/, + &ev_hdrs[num_bufs], ret); + + for (i = num_bufs; i < num_bufs + ret; i++) { + /* For optimization, no init for feature vars */ + if (!esv_ena) + ev_hdrs[i]->event = events[i]; + + ev_hdrs[i]->user_area.all = 0; + ev_hdrs[i]->user_area.req_size = pool_elem->user_area.req_size; + ev_hdrs[i]->user_area.pad_size = pool_elem->user_area.pad_size; + ev_hdrs[i]->user_area.isinit = 1; + + ev_hdrs[i]->event_size = size; + ev_hdrs[i]->align_offset = pool_elem->align_offset; + ev_hdrs[i]->event_type = type; + ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdrs[i]->flags.all = 0; + } + + num_bufs += ret; + if (likely(num_bufs == num)) + break; /* all allocated */ + num_req -= ret; + } + + return num_bufs; /* number of allocated bufs (0 ... num) */ +} + +/** + * Allocate & initialize an event based on an odp-pkt. + */ +static inline event_hdr_t * +event_alloc_pkt(const mpool_elem_t *pool_elem, + uint32_t size, em_event_type_t type) +{ + const uint32_t push_len = pool_elem->align_offset; + uint32_t pull_len; + uint32_t alloc_size; + odp_packet_t odp_pkt = ODP_PACKET_INVALID; + int subpool; + + if (size > push_len) { + alloc_size = size - push_len; + pull_len = 0; + } else { + alloc_size = 1; /* min allowed */ + pull_len = push_len + 1 - size; + } + + /* + * Allocate from the 'best fit' subpool, or if that is full, from the + * next subpool that has pkts available of a bigger size. + */ + subpool = pool_find_subpool(pool_elem, size); + if (unlikely(subpool < 0)) + return NULL; + + for (; subpool < pool_elem->num_subpools; subpool++) { + odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; + + if (EM_CHECK_LEVEL > 1 && + unlikely(odp_pool == ODP_POOL_INVALID)) + return NULL; + + odp_pkt = odp_packet_alloc(odp_pool, alloc_size); + if (likely(odp_pkt != ODP_PACKET_INVALID)) + break; + } + + if (unlikely(odp_pkt == ODP_PACKET_INVALID)) + return NULL; + + /* + * odp packet now allocated - adjust the payload start address and + * init the EM event header in the odp-pkt user-area + */ + + /* Adjust event payload start-address based on alignment config */ + const void *ptr; + + if (push_len) { + ptr = odp_packet_push_head(odp_pkt, push_len); + if (unlikely(!ptr)) + goto err_pktalloc; + } + if (pull_len) { + ptr = odp_packet_pull_tail(odp_pkt, pull_len); + if (unlikely(!ptr)) + goto err_pktalloc; + } + + /* + * Set the pkt user ptr to be able to recognize pkt-events that + * EM has created vs pkts from pkt-input that needs their + * ev-hdrs to be initialized. + */ + odp_packet_user_ptr_set(odp_pkt, PKT_USERPTR_MAGIC_NBR); + + event_hdr_t *const ev_hdr = odp_packet_user_area(odp_pkt); + odp_event_t odp_event = odp_packet_to_event(odp_pkt); + em_event_t event = event_odp2em(odp_event); + + if (unlikely(ev_hdr == NULL)) + goto err_pktalloc; + + ev_hdr->user_area.all = 0; + ev_hdr->user_area.req_size = pool_elem->user_area.req_size; + ev_hdr->user_area.pad_size = pool_elem->user_area.pad_size; + ev_hdr->user_area.isinit = 1; + + ev_hdr->event = event; /* store this event handle */ + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdr->event_size = size; /* store requested size */ + /* ev_hdr->align_offset = needed by odp bufs only */ + ev_hdr->event_type = type; /* store the event type */ + ev_hdr->flags.all = 0; + + return ev_hdr; + +err_pktalloc: + odp_packet_free(odp_pkt); + return NULL; +} + +/* + * Helper for event_alloc_pkt_multi() + */ +static inline int +pktalloc_multi(odp_packet_t odp_pkts[/*out*/], int num, + odp_pool_t odp_pool, uint32_t size, + uint32_t push_len, uint32_t pull_len) +{ + int ret = odp_packet_alloc_multi(odp_pool, size, odp_pkts, num); + + if (unlikely(ret <= 0)) + return 0; + + const int num_pkts = ret; /* return value > 0 */ + const void *ptr = NULL; + int i; + + /* Adjust payload start-address based on alignment config */ + if (push_len) { + for (i = 0; i < num_pkts; i++) { + ptr = odp_packet_push_head(odp_pkts[i], push_len); + if (unlikely(!ptr)) + goto err_pktalloc_multi; + } + } + if (pull_len) { + for (i = 0; i < num_pkts; i++) { + ptr = odp_packet_pull_tail(odp_pkts[i], pull_len); + if (unlikely(!ptr)) + goto err_pktalloc_multi; /* only before esv */ + } + } + + /* + * Set the pkt user ptr to be able to recognize pkt-events that + * EM has created vs pkts from pkt-input that needs their + * ev-hdrs to be initialized. + */ + for (i = 0; i < num_pkts; i++) + odp_packet_user_ptr_set(odp_pkts[i], PKT_USERPTR_MAGIC_NBR); + + return num_pkts; + +err_pktalloc_multi: + odp_packet_free_multi(odp_pkts, num_pkts); + return 0; +} + +/** + * Allocate & initialize multiple events based on odp-pkts. + */ +static inline int +event_alloc_pkt_multi(em_event_t events[/*out*/], const int num, + const mpool_elem_t *pool_elem, uint32_t size, + em_event_type_t type) +{ + const uint32_t push_len = pool_elem->align_offset; + uint32_t pull_len; + odp_packet_t odp_pkts[num]; + /* use same output-array: odp_events[] = events[] */ + odp_event_t *const odp_events = (odp_event_t *)events; + event_hdr_t *ev_hdrs[num]; + uint32_t alloc_size; + int subpool; + const bool esv_ena = esv_enabled(); + + if (size > push_len) { + alloc_size = size - push_len; + pull_len = 0; + } else { + alloc_size = 1; /* min allowed */ + pull_len = push_len + 1 - size; + } + + /* + * Allocate from the 'best fit' subpool, or if that is full, from the + * next subpool that has pkts available of a bigger size. + */ + subpool = pool_find_subpool(pool_elem, size); + if (unlikely(subpool < 0)) + return 0; + + int num_req = num; + int num_pkts = 0; + int i; + + for (; subpool < pool_elem->num_subpools; subpool++) { + odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; + + if (EM_CHECK_LEVEL > 1 && + unlikely(odp_pool == ODP_POOL_INVALID)) + return 0; + + int ret = pktalloc_multi(&odp_pkts[num_pkts], num_req, + odp_pool, alloc_size, + push_len, pull_len); + if (unlikely(ret <= 0)) + continue; /* try next subpool */ + + /* + * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'. + * Note: odp_events[] points&writes into events[out] + */ + odp_packet_to_event_multi(&odp_pkts[num_pkts], + &odp_events[num_pkts], ret); + + for (i = num_pkts; i < num_pkts + ret; i++) + ev_hdrs[i] = odp_packet_user_area(odp_pkts[i]); + + /* + * Note: events[] == odp_events[] before ESV init. + * Don't touch odp_events[] during this loop-round anymore. + */ + if (esv_ena) + evstate_alloc_multi(&events[num_pkts] /*in/out*/, + &ev_hdrs[num_pkts], ret); + + for (i = num_pkts; i < num_pkts + ret; i++) { + /* For optimization, no init for feature vars */ + ev_hdrs[i]->user_area.all = 0; + ev_hdrs[i]->user_area.req_size = pool_elem->user_area.req_size; + ev_hdrs[i]->user_area.pad_size = pool_elem->user_area.pad_size; + ev_hdrs[i]->user_area.isinit = 1; + + if (!esv_ena) + ev_hdrs[i]->event = events[i]; + + ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; + ev_hdrs[i]->event_size = size; + /* ev_hdr->align_offset = needed by odp bufs only */ + ev_hdrs[i]->event_type = type; + ev_hdrs[i]->flags.all = 0; + } + + num_pkts += ret; + if (likely(num_pkts == num)) + break; /* all allocated */ + num_req -= ret; + } + + return num_pkts; /* number of allocated pkts */ +} + +static inline event_hdr_t * +event_alloc_vector(const mpool_elem_t *pool_elem, + uint32_t size, em_event_type_t type) +{ + odp_packet_vector_t odp_pktvec = ODP_PACKET_VECTOR_INVALID; + int subpool; + + /* + * Allocate from the 'best fit' subpool, or if that is full, from the + * next subpool that has pkts available of a bigger size. + */ + subpool = pool_find_subpool(pool_elem, size); + if (unlikely(subpool < 0)) + return NULL; + + for (; subpool < pool_elem->num_subpools; subpool++) { + odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; + + if (EM_CHECK_LEVEL > 1 && + unlikely(odp_pool == ODP_POOL_INVALID)) + return NULL; + + odp_pktvec = odp_packet_vector_alloc(odp_pool); + if (likely(odp_pktvec != ODP_PACKET_VECTOR_INVALID)) + break; + } + + if (unlikely(odp_pktvec == ODP_PACKET_VECTOR_INVALID)) + return NULL; + + /* Ensure the size of the vector table is 0 after alloc */ + odp_packet_vector_size_set(odp_pktvec, 0); + + /* + * Packet vector now allocated: + * Init the EM event header in the odp-pkt-vector user-area. + */ + + /* + * Set the pktvec user flag to be able to recognize vectors that + * EM has created vs. vectors from pkt-input that needs their + * ev-hdrs to be initialized. + */ + odp_packet_vector_user_flag_set(odp_pktvec, USER_FLAG_SET); + + event_hdr_t *const ev_hdr = odp_packet_vector_user_area(odp_pktvec); + odp_event_t odp_event = odp_packet_vector_to_event(odp_pktvec); + em_event_t event = event_odp2em(odp_event); + + if (unlikely(ev_hdr == NULL)) + goto err_vecalloc; + + ev_hdr->user_area.all = 0; + ev_hdr->user_area.req_size = pool_elem->user_area.req_size; + ev_hdr->user_area.pad_size = pool_elem->user_area.pad_size; + ev_hdr->user_area.isinit = 1; + + ev_hdr->event = event; /* store this event handle */ + ev_hdr->event_size = size; /* store requested size */ + /* ev_hdr->align_offset = needed by odp bufs only */ + ev_hdr->event_type = type; /* store the event type */ + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + + return ev_hdr; + +err_vecalloc: + odp_packet_vector_free(odp_pktvec); + return NULL; +} + +/* + * Helper for event_alloc_vec_multi() + */ +static inline int +vecalloc_multi(odp_packet_vector_t odp_pktvecs[/*out*/], int num, + odp_pool_t odp_pool) +{ + int i; + + for (i = 0; i < num; i++) { + odp_pktvecs[i] = odp_packet_vector_alloc(odp_pool); + if (unlikely(odp_pktvecs[i] == ODP_PACKET_VECTOR_INVALID)) + break; + } + + const int num_vecs = i; + + if (unlikely(num_vecs == 0)) + return 0; + + /* + * Set the pkt vector user ptr to be able to recognize vector-events + * that EM has created vs vectors from pkt-input that needs their + * ev-hdrs to be initialized. + */ + for (i = 0; i < num_vecs; i++) { + odp_packet_vector_user_flag_set(odp_pktvecs[i], USER_FLAG_SET); + /* Ensure the size of the vector table is 0 after alloc */ + odp_packet_vector_size_set(odp_pktvecs[i], 0); + } + + return num_vecs; +} + +/** + * Allocate & initialize multiple events based on odp-pkt-vectors. + */ +static inline int +event_alloc_vector_multi(em_event_t events[/*out*/], const int num, + const mpool_elem_t *pool_elem, uint32_t size, + em_event_type_t type) +{ + odp_packet_vector_t odp_pktvecs[num]; + /* use same output-array: odp_events[] = events[] */ + odp_event_t *const odp_events = (odp_event_t *)events; + event_hdr_t *ev_hdrs[num]; + int subpool; + const bool esv_ena = esv_enabled(); + + /* + * Allocate from the 'best fit' subpool, or if that is full, from the + * next subpool that has pkts available of a bigger size. + */ + subpool = pool_find_subpool(pool_elem, size); + if (unlikely(subpool < 0)) + return 0; + + int num_req = num; + int num_vecs = 0; + int i; + + for (; subpool < pool_elem->num_subpools; subpool++) { + odp_pool_t odp_pool = pool_elem->odp_pool[subpool]; + + if (EM_CHECK_LEVEL > 1 && + unlikely(odp_pool == ODP_POOL_INVALID)) + return 0; + + int ret = vecalloc_multi(&odp_pktvecs[num_vecs], num_req, + odp_pool); + if (unlikely(ret <= 0)) + continue; /* try next subpool */ + + /* + * Init 'ret' ev-hdrs from this 'subpool'=='odp-pool'. + * Note: odp_events[] points&writes into events[out] + */ + for (i = num_vecs; i < num_vecs + ret; i++) { + odp_events[i] = odp_packet_vector_to_event(odp_pktvecs[i]); + ev_hdrs[i] = odp_packet_vector_user_area(odp_pktvecs[i]); + } + + /* + * Note: events[] == odp_events[] before ESV init. + * Don't touch odp_events[] during this loop-round anymore. + */ + if (esv_ena) + evstate_alloc_multi(&events[num_vecs] /*in/out*/, + &ev_hdrs[num_vecs], ret); + + for (i = num_vecs; i < num_vecs + ret; i++) { + /* For optimization, no init for feature vars */ + if (!esv_ena) + ev_hdrs[i]->event = events[i]; + + ev_hdrs[i]->user_area.all = 0; + ev_hdrs[i]->user_area.req_size = pool_elem->user_area.req_size; + ev_hdrs[i]->user_area.pad_size = pool_elem->user_area.pad_size; + ev_hdrs[i]->user_area.isinit = 1; + + ev_hdrs[i]->event_size = size; + /* ev_hdr->align_offset = needed by odp bufs only */ + ev_hdrs[i]->event_type = type; + ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; + } + + num_vecs += ret; + if (likely(num_vecs == num)) + break; /* all allocated */ + num_req -= ret; + } + + return num_vecs; /* number of allocated pkts */ +} + +/** + * Helper for em_alloc() and em_event_clone() + */ +static inline event_hdr_t * +event_alloc(const mpool_elem_t *pool_elem, uint32_t size, em_event_type_t type) +{ + /* + * EM event pools created with type=PKT can support: + * - SW events (bufs) + * - pkt events. + * + * EM event pools created with type=SW can support: + * - SW events (bufs) only + */ + event_hdr_t *ev_hdr = NULL; + + if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) + ev_hdr = event_alloc_pkt(pool_elem, size, type); + else if (pool_elem->event_type == EM_EVENT_TYPE_SW) + ev_hdr = event_alloc_buf(pool_elem, size, type); + else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR) + ev_hdr = event_alloc_vector(pool_elem, size, type); + + /* event now allocated (if !NULL): ev_hdr->event */ + + /* + * ESV state update for the event still needs to be done by the caller, + * not done here since there are different callers of this function. + * if (esv_enabled()) + * event = evstate_alloc/clone/...(event, ev_hdr); + */ + + return ev_hdr; /* can be NULL */ +} + +/** + * Start-up helper for pool preallocation + */ +static inline event_prealloc_hdr_t * +event_prealloc(const mpool_elem_t *pool_elem, size_t size, em_event_type_t type) +{ + /* + * EM event pools created with type=PKT can support: + * - SW events (bufs) + * - pkt events. + * + * EM event pools created with type=SW can support: + * - SW events (bufs) only + */ + event_hdr_t *ev_hdr = NULL; + + if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) + ev_hdr = event_alloc_pkt(pool_elem, size, type); + else if (pool_elem->event_type == EM_EVENT_TYPE_SW) + ev_hdr = event_alloc_buf(pool_elem, size, type); + else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR) + ev_hdr = event_alloc_vector(pool_elem, size, type); + + if (unlikely(ev_hdr == NULL)) + return NULL; + + /* event now allocated */ + em_event_t event = ev_hdr->event; + + if (esv_enabled()) + event = evstate_prealloc(event, ev_hdr); + + event_prealloc_hdr_t *prealloc_hdr = (event_prealloc_hdr_t *)ev_hdr; + + return prealloc_hdr; +} + +static inline event_prealloc_hdr_t * +list_node_to_prealloc_hdr(list_node_t *const list_node) +{ + event_prealloc_hdr_t *const ev_hdr = (event_prealloc_hdr_t *)(uintptr_t) + ((uint8_t *)list_node - offsetof(event_prealloc_hdr_t, list_node)); + + return likely(list_node != NULL) ? ev_hdr : NULL; +} + +/** + * @brief Convert event vector table content to odp packets in-place. + * + * Convert an EM event vector table, containing em_event_t:s with + * esv-info (evgen), to a table of odp packets (remove handles' evgen in-place). + */ +static inline void +vector_tbl2odp(odp_event_t odp_event_pktvec) +{ + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec); + odp_packet_t *pkt_tbl = NULL; + const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/); + + if (likely(pkts > 0)) { + /* Careful! Points to same table */ + em_event_t *event_tbl = (em_event_t *)pkt_tbl; + + /* Drop ESV event generation (evgen) from event handle */ + (void)events_em2pkt_inplace(event_tbl, pkts); + } +} + +/** + * @brief Convert ODP packet vector table content to EM events. + * + * Convert an ODP packet vector table to a table of EM events. + * The content must be known to be raw odp packets. + * + * For recovery purposes only. + */ +static inline void +vector_tbl2em(odp_event_t odp_event_pktvec) +{ + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event_pktvec); + odp_packet_t *pkt_tbl = NULL; + const int pkts = odp_packet_vector_tbl(pkt_vec, &pkt_tbl/*out*/); + + if (likely(pkts > 0)) { + em_event_t *const ev_tbl = (em_event_t *const)pkt_tbl; + odp_packet_t odp_pkttbl[pkts]; + event_hdr_t *ev_hdr_tbl[pkts]; + + /* + * Copy pkts from vector's pkt-table using events_em2pkt() that + * also drops any evgen-info from the handles if present. + */ + events_em2pkt(ev_tbl/*in*/, odp_pkttbl/*out*/, pkts); + + event_init_pkt_multi(odp_pkttbl /*in*/, ev_tbl /*in,out*/, + ev_hdr_tbl /*out*/, pkts, false); + } +} + +static inline em_status_t +send_event(em_event_t event, const queue_elem_t *q_elem) +{ + const bool esv_ena = esv_enabled(); + odp_event_t odp_event = event_em2odp(event); + odp_queue_t odp_queue = q_elem->odp_queue; + int ret; + + if (unlikely(EM_CHECK_LEVEL > 1 && + (odp_event == ODP_EVENT_INVALID || + odp_queue == ODP_QUEUE_INVALID))) + return EM_ERR_NOT_FOUND; + + if (unlikely(EM_CHECK_LEVEL > 0 && + q_elem->state != EM_QUEUE_STATE_READY)) { + return EM_ERR_BAD_STATE; + } + + /* + * Vector: convert the event vector table to a table of odp packets + * (in-place) before passing the vector and contents to the scheduler. + */ + if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR) + vector_tbl2odp(odp_event); + + /* Enqueue event for scheduling */ + ret = odp_queue_enq(odp_queue, odp_event); + + if (unlikely(EM_CHECK_LEVEL > 0 && ret != 0)) { + /* Restore EM vector event-table before returning vector to user */ + if (esv_ena && odp_event_type(odp_event) == ODP_EVENT_PACKET_VECTOR) + vector_tbl2em(odp_event); + + return EM_ERR_LIB_FAILED; + } + + return EM_OK; +} + +static inline int +send_event_multi(const em_event_t events[], const int num, + const queue_elem_t *q_elem) +{ + const bool esv_ena = esv_enabled(); + odp_event_t odp_events[num]; + odp_queue_t odp_queue = q_elem->odp_queue; + + if (unlikely(EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID)) + return 0; + + if (unlikely(EM_CHECK_LEVEL > 0 && + q_elem->state != EM_QUEUE_STATE_READY)) { + return 0; + } + + events_em2odp(events, odp_events/*out*/, num); + + /* + * Vector: convert the event vector table to a table of odp packets + * (in-place) before passing the vector and contents to the scheduler. + */ + if (esv_ena) { + for (int i = 0; i < num; i++) { + if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR) + vector_tbl2odp(odp_events[i]); + } + } + + /* Enqueue events for scheduling */ + int ret = odp_queue_enq_multi(odp_queue, odp_events, num); + + if (unlikely(ret != num)) { + int enq = ret < 0 ? 0 : ret; + + /* Restore EM vector event-table before returning vector to user */ + if (esv_ena) { + for (int i = enq; i < num; i++) { + if (odp_event_type(odp_events[i]) == ODP_EVENT_PACKET_VECTOR) + vector_tbl2em(odp_events[i]); + } + } + + return enq; + } + + return num; +} + +static inline em_status_t +send_local(em_event_t event, queue_elem_t *const q_elem) +{ + em_locm_t *const locm = &em_locm; + const em_queue_prio_t prio = q_elem->priority; + evhdl_t evhdl = {.event = event}; + int ret; + + if (unlikely(EM_CHECK_LEVEL > 0 && + q_elem->state != EM_QUEUE_STATE_READY)) + return EM_ERR_BAD_STATE; + + stash_entry_t entry = {.qidx = queue_hdl2idx(q_elem->queue), + .evptr = evhdl.evptr}; + + ret = odp_stash_put_u64(locm->local_queues.prio[prio].stash, + &entry.u64, 1); + if (likely(ret == 1)) { + locm->local_queues.empty = 0; + locm->local_queues.prio[prio].empty_prio = 0; + return EM_OK; + } + + return EM_ERR_LIB_FAILED; +} + +static inline int +send_local_multi(const em_event_t events[], const int num, + queue_elem_t *const q_elem) +{ + em_locm_t *const locm = &em_locm; + const em_queue_prio_t prio = q_elem->priority; + const evhdl_t *const evhdl_tbl = (const evhdl_t *const)events; + + if (unlikely(EM_CHECK_LEVEL > 0 && + q_elem->state != EM_QUEUE_STATE_READY)) + return 0; + + stash_entry_t entry_tbl[num]; + const uint16_t qidx = queue_hdl2idx(q_elem->queue); + + for (int i = 0; i < num; i++) { + entry_tbl[i].qidx = qidx; + entry_tbl[i].evptr = evhdl_tbl[i].evptr; + } + + int ret = odp_stash_put_u64(locm->local_queues.prio[prio].stash, + &entry_tbl[0].u64, num); + if (likely(ret > 0)) { + locm->local_queues.empty = 0; + locm->local_queues.prio[prio].empty_prio = 0; + return ret; + } + + return 0; +} + +/** + * Send one event to a queue of type EM_QUEUE_TYPE_OUTPUT + */ +static inline em_status_t +send_output(em_event_t event, queue_elem_t *const output_q_elem) +{ + const em_sched_context_type_t sched_ctx_type = + em_locm.current.sched_context_type; + + if (unlikely(EM_CHECK_LEVEL > 0 && + output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) + return EM_ERR_BAD_STATE; + + /* + * An event sent to an output queue from an ordered context needs to + * be 're-ordered' before calling the user provided output-function. + * Order is maintained by enqueuing and dequeuing into an odp-queue + * that takes care of order. + */ + if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) { + const odp_queue_t odp_queue = output_q_elem->odp_queue; + odp_event_t odp_event = event_em2odp(event); + int ret; + + if (unlikely(EM_CHECK_LEVEL > 1 && + (odp_event == ODP_EVENT_INVALID || + odp_queue == ODP_QUEUE_INVALID))) + return EM_ERR_NOT_FOUND; + + if (!EM_OUTPUT_QUEUE_IMMEDIATE) + output_queue_track(output_q_elem); + + /* enqueue to enforce odp to handle ordering */ + ret = odp_queue_enq(odp_queue, odp_event); + if (unlikely(ret != 0)) + return EM_ERR_LIB_FAILED; + + /* return value must be EM_OK after this since event enqueued */ + + if (EM_OUTPUT_QUEUE_IMMEDIATE) { + env_spinlock_t *const lock = + &output_q_elem->output.lock; + + if (!env_spinlock_trylock(lock)) + return EM_OK; + output_queue_drain(output_q_elem); + env_spinlock_unlock(lock); + } + + return EM_OK; + } + + /* + * No ordered context - call output_fn() directly + */ + const em_queue_t output_queue = output_q_elem->queue; + const em_output_func_t output_fn = + output_q_elem->output.output_conf.output_fn; + void *const output_fn_args = + output_q_elem->output.output_conf.output_fn_args; + int sent; + + sent = output_fn(&event, 1, output_queue, output_fn_args); + if (unlikely(sent != 1)) + return EM_ERR_OPERATION_FAILED; + + return EM_OK; +} + +/** + * Send events to a queue of type EM_QUEUE_TYPE_OUTPUT + */ +static inline int +send_output_multi(const em_event_t events[], const unsigned int num, + queue_elem_t *const output_q_elem) +{ + const em_sched_context_type_t sched_ctx_type = + em_locm.current.sched_context_type; + int sent; + + if (unlikely(EM_CHECK_LEVEL > 0 && + output_q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) + return 0; + + /* + * Event sent to an output queue from an ordered context needs to + * be 're-ordered' before calling the user provided output-function. + * Order is maintained by enqueuing and dequeuing into an odp-queue + * that takes care of order. + */ + if (sched_ctx_type == EM_SCHED_CONTEXT_TYPE_ORDERED) { + const odp_queue_t odp_queue = output_q_elem->odp_queue; + odp_event_t odp_events[num]; + + if (unlikely(EM_CHECK_LEVEL > 1 && + odp_queue == ODP_QUEUE_INVALID)) + return 0; + + if (!EM_OUTPUT_QUEUE_IMMEDIATE) + output_queue_track(output_q_elem); + + events_em2odp(events, odp_events/*out*/, num); + + /* enqueue to enforce odp to handle ordering */ + sent = odp_queue_enq_multi(odp_queue, odp_events, num); + if (unlikely(sent <= 0)) + return 0; + + /* the return value must be the number of enqueued events */ + + if (EM_OUTPUT_QUEUE_IMMEDIATE) { + env_spinlock_t *const lock = + &output_q_elem->output.lock; + + if (!env_spinlock_trylock(lock)) + return sent; + output_queue_drain(output_q_elem); + env_spinlock_unlock(lock); + } + + return sent; + } + + /* + * No ordered context - call output_fn() directly + */ + const em_queue_t output_queue = output_q_elem->queue; + const em_output_func_t output_fn = output_q_elem->output.output_conf.output_fn; + void *const output_fn_args = output_q_elem->output.output_conf.output_fn_args; + + sent = output_fn(events, num, output_queue, output_fn_args); + + return sent; +} + +/** + * Return a pointer to the EM event user payload. + * Helper to e.g. EM API em_event_pointer() + */ +static inline void * +event_pointer(em_event_t event) +{ + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_etype = odp_event_type(odp_event); + void *ev_ptr = NULL; /* return value */ + + if (odp_etype == ODP_EVENT_PACKET) { + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + ev_ptr = odp_packet_data(odp_pkt); + } else if (odp_etype == ODP_EVENT_BUFFER) { + odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); + const event_hdr_t *ev_hdr = odp_buffer_addr(odp_buf); + size_t uarea_pad_sz = 0; + + if (ev_hdr->user_area.isinit) + uarea_pad_sz = ev_hdr->user_area.pad_size; + + ev_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t) + + uarea_pad_sz - ev_hdr->align_offset); + } + + return ev_ptr; /* NULL for unrecognized odp_etype, also for vectors */ +} + +static inline bool +event_has_ref(em_event_t event) +{ + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_etype = odp_event_type(odp_event); + + if (odp_etype != ODP_EVENT_PACKET) + return false; + + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + return odp_packet_has_ref(odp_pkt) ? true : false; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EVENT_H_ */ diff --git a/src/em_event_inline.h b/src/em_event_inline.h new file mode 100644 index 00000000..ebe232dd --- /dev/null +++ b/src/em_event_inline.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM internal event functions + * + */ + +#ifndef EM_EVENT_INLINE_H_ +#define EM_EVENT_INLINE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** Convert an EM-event into an ODP-event */ +static inline odp_event_t +event_em2odp(em_event_t event) +{ + /* Valid for both ESV enabled and disabled */ + evhdl_t evhdl = {.event = event}; + + return (odp_event_t)(uintptr_t)evhdl.evptr; +} + +/** + * Convert an ODP-event into an EM-event + * + * @note The returned EM-event does NOT contain the ESV event-generation-count + * evhdl_t::evgen! This must be set separately when using ESV. + */ +static inline em_event_t +event_odp2em(odp_event_t odp_event) +{ + /* Valid for both ESV enabled and disabled */ + + /* + * Setting 'evhdl.event = odp_event' is equal to + * 'evhdl.evptr = odp_event, evhdl.evgen = 0' + * (evhdl.evgen still needs to be set when using ESV) + */ + evhdl_t evhdl = {.event = (em_event_t)(uintptr_t)odp_event}; + + return evhdl.event; +} + +/** Convert an array of EM-events into an array ODP-events */ +static inline void +events_em2odp(const em_event_t events[/*in*/], + odp_event_t odp_events[/*out*/], const unsigned int num) +{ + /* Valid for both ESV enabled and disabled */ + const evhdl_t *const evhdls = (const evhdl_t *)events; + + for (unsigned int i = 0; i < num; i++) + odp_events[i] = (odp_event_t)(uintptr_t)evhdls[i].evptr; +} + +/** + * Convert an array of ODP-events into an array of EM-events + * + * @note The output EM-events do NOT contain the ESV event-generation-count + * evhdl_t::evgen! This must be set separately when using ESV. + */ +static inline void +events_odp2em(const odp_event_t odp_events[/*in*/], + em_event_t events[/*out*/], const unsigned int num) +{ + /* Valid for both ESV enabled and disabled */ + evhdl_t *const evhdls = (evhdl_t *)events; + + /* + * Setting 'evhdls[i].event = odp_events[i]' is equal to + * 'evhdls[i].evptr = odp_events[i], evhdl[i].evgen = 0' + * (evhdls[i].evgen still needs to be set when using ESV) + */ + for (unsigned int i = 0; i < num; i++) + evhdls[i].event = (em_event_t)(uintptr_t)odp_events[i]; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EVENT_INLINE_H_ */ diff --git a/src/em_event_state.c b/src/em_event_state.c index c5ff76d4..1099e0dd 100644 --- a/src/em_event_state.c +++ b/src/em_event_state.c @@ -1,1065 +1,1099 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -static int read_config_file(void); - -/** - * Initial counter values set during an alloc-operation: free=0, send=0 - * (em_alloc/_multi(), em_event_clone()) - */ -static const evstate_cnt_t init_cnt_alloc = {.evgen = EVGEN_INIT, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 0 + SEND_CNT_INIT}; -/** - * Initial counter values for external events entering into EM - * (event not allocated by EM): free:0, send=1 - */ -static const evstate_cnt_t init_cnt_extev = {.evgen = EVGEN_INIT, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 1 + SEND_CNT_INIT}; -/** - * Expected counter values after an alloc-operation: free=0, send=0 - * (e.g. em_alloc/_multi(), em_event_clone()) - */ -static const evstate_cnt_t exp_cnt_alloc = {.evgen = 0 /* any val possible */, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 0 + SEND_CNT_INIT}; -/** - * Expected counter values after a free-operation: free=1, send=0 - * (e.g. em_free...(), em_event_mark_free()) - */ -static const evstate_cnt_t exp_cnt_free = {.evgen = 0 /* any val possible */, - .free_cnt = 1 + FREE_CNT_INIT, - .send_cnt = 0 + SEND_CNT_INIT}; -/** - * Expected counter values after a unmark-free-operation: free=0, send=0 - * (em_event_unmark_free()) - */ -static const evstate_cnt_t exp_cnt_free_revert = {.evgen = 0 /* any val possible */, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 0 + SEND_CNT_INIT}; - -/** - * Expected counter values after a user-to-EM transition: free=0, send=1 - * (e.g. em_send...(), em_tmo_ack/set...()) - */ -static const evstate_cnt_t exp_cnt_usr2em = {.evgen = 0 /* any val possible */, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 1 + SEND_CNT_INIT}; -/** - * Expected counter values after a failed user-to-EM transition: free=0, send=0 - * (e.g. em_send...() fail, em_tmo_ack/set...() fail) - */ -static const evstate_cnt_t -exp_cnt_usr2em_revert = {.evgen = 0 /* any val possible */, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 0 + SEND_CNT_INIT}; - -/** - * Expected counter values after an EM-to-user transition: free=0, send=0 - * (e.g. dispatch event, output-queue/event-chaining callback, - * em_queue_dequeue...(), em_tmo_delete/cancel()) - */ -static const evstate_cnt_t exp_cnt_em2usr = {.evgen = 0 /* any val possible */, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 0 + SEND_CNT_INIT}; -/** - * Expected counter values after a failed EM-to-user transition: free=0, send=1 - * (e.g. em_send(emc or output-queue callback) fail - */ -static const evstate_cnt_t -exp_cnt_em2usr_revert = {.evgen = 0 /* any val possible */, - .free_cnt = 0 + FREE_CNT_INIT, - .send_cnt = 1 + SEND_CNT_INIT}; - -/** - * Information about an event-state update location - */ -typedef struct { - const char *str; - em_escope_t escope; -} evstate_info_t; - -/** - * Constant table containing event-state update location information. - * Only accessed when an erroneous event state has been detected and is being - * reported to the error handler. - */ -static const evstate_info_t evstate_info_tbl[] = { - [EVSTATE__UNDEF] = {.str = "undefined", - .escope = (EM_ESCOPE_INTERNAL_MASK | 0)}, - [EVSTATE__PREALLOC] = {.str = "pool-create(prealloc-events)", - .escope = EM_ESCOPE_POOL_CREATE}, - [EVSTATE__ALLOC] = {.str = "em_alloc()", - .escope = EM_ESCOPE_ALLOC}, - [EVSTATE__ALLOC_MULTI] = {.str = "em_alloc_multi()", - .escope = EM_ESCOPE_ALLOC_MULTI}, - [EVSTATE__EVENT_CLONE] = {.str = "em_event_clone()", - .escope = EM_ESCOPE_EVENT_CLONE}, - [EVSTATE__FREE] = {.str = "em_free()", - .escope = EM_ESCOPE_FREE}, - [EVSTATE__FREE_MULTI] = {.str = "em_free_multi()", - .escope = EM_ESCOPE_FREE_MULTI}, - [EVSTATE__INIT] = {.str = "init-event", - .escope = EM_ESCOPE_ODP_EXT}, - [EVSTATE__INIT_MULTI] = {.str = "init-events", - .escope = EM_ESCOPE_ODP_EXT}, - [EVSTATE__INIT_EXTEV] = {.str = "dispatch(init-ext-event)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__INIT_EXTEV_MULTI] = {.str = "dispatch(init-ext-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__UPDATE_EXTEV] = {.str = "dispatch(update-ext-event)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__SEND] = {.str = "em_send()", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__SEND__FAIL] = {.str = "em_send(fail)", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__SEND_EGRP] = {.str = "em_send_group()", - .escope = EM_ESCOPE_SEND_GROUP}, - [EVSTATE__SEND_EGRP__FAIL] = {.str = "em_send_group(fail)", - .escope = EM_ESCOPE_SEND_GROUP}, - [EVSTATE__SEND_MULTI] = {.str = "em_send_multi()", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__SEND_MULTI__FAIL] = {.str = "em_send_multi(fail)", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__SEND_EGRP_MULTI] = {.str = "em_send_group_multi()", - .escope = EM_ESCOPE_SEND_GROUP_MULTI}, - [EVSTATE__SEND_EGRP_MULTI__FAIL] = {.str = "em_send_group_multi(fail)", - .escope = EM_ESCOPE_SEND_GROUP_MULTI}, - [EVSTATE__MARK_SEND] = {.str = "em_event_mark_send()", - .escope = EM_ESCOPE_EVENT_MARK_SEND}, - [EVSTATE__UNMARK_SEND] = {.str = "em_event_unmark_send()", - .escope = EM_ESCOPE_EVENT_UNMARK_SEND}, - [EVSTATE__MARK_FREE] = {.str = "em_event_mark_free()", - .escope = EM_ESCOPE_EVENT_MARK_FREE}, - [EVSTATE__UNMARK_FREE] = {.str = "em_event_unmark_free()", - .escope = EM_ESCOPE_EVENT_UNMARK_FREE}, - [EVSTATE__MARK_FREE_MULTI] = {.str = "em_event_mark_free_multi()", - .escope = EM_ESCOPE_EVENT_MARK_FREE_MULTI}, - [EVSTATE__UNMARK_FREE_MULTI] = {.str = "em_event_unmark_free_multi()", - .escope = EM_ESCOPE_EVENT_UNMARK_FREE_MULTI}, - [EVSTATE__DISPATCH] = {.str = "em_dispatch(single-event)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DISPATCH_MULTI] = {.str = "em_dispatch(multiple-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DISPATCH_SCHED__FAIL] = {.str = "em_dispatch(drop sched-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DISPATCH_LOCAL__FAIL] = {.str = "em_dispatch(drop local-events)", - .escope = EM_ESCOPE_DISPATCH}, - [EVSTATE__DEQUEUE] = {.str = "em_queue_dequeue()", - .escope = EM_ESCOPE_QUEUE_DEQUEUE}, - [EVSTATE__DEQUEUE_MULTI] = {.str = "em_queue_dequeue_multi()", - .escope = EM_ESCOPE_QUEUE_DEQUEUE_MULTI}, - [EVSTATE__OUTPUT] = {.str = "em_send(output-Q)", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__OUTPUT__FAIL] = {.str = "em_send(output-Q:fail)", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__OUTPUT_MULTI] = {.str = "em_send_multi(output-Q)", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__OUTPUT_MULTI__FAIL] = {.str = "em_send_multi(output-Q:fail)", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__OUTPUT_CHAINING] = {.str = "em_send(emc-Q)", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__OUTPUT_CHAINING__FAIL] = {.str = "em_send(emc-Q:fail)", - .escope = EM_ESCOPE_SEND}, - [EVSTATE__OUTPUT_CHAINING_MULTI] = {.str = "em_send_multi(emc-Q)", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__OUTPUT_CHAINING_MULTI__FAIL] = {.str = "em_send_multi(emc-Q:fail)", - .escope = EM_ESCOPE_SEND_MULTI}, - [EVSTATE__TMO_SET_ABS] = {.str = "em_tmo_set_abs()", - .escope = EM_ESCOPE_TMO_SET_ABS}, - [EVSTATE__TMO_SET_ABS__FAIL] = {.str = "em_tmo_set_abs(fail)", - .escope = EM_ESCOPE_TMO_SET_ABS}, - [EVSTATE__TMO_SET_REL] = {.str = "em_tmo_set_rel()", - .escope = EM_ESCOPE_TMO_SET_REL}, - [EVSTATE__TMO_SET_REL__FAIL] = {.str = "em_tmo_set_rel(fail)", - .escope = EM_ESCOPE_TMO_SET_REL}, - [EVSTATE__TMO_SET_PERIODIC] = {.str = "em_tmo_set_periodic()", - .escope = EM_ESCOPE_TMO_SET_PERIODIC}, - [EVSTATE__TMO_SET_PERIODIC__FAIL] = {.str = "em_tmo_set_periodic(fail)", - .escope = EM_ESCOPE_TMO_SET_PERIODIC}, - [EVSTATE__TMO_CANCEL] = {.str = "em_tmo_cancel()", - .escope = EM_ESCOPE_TMO_CANCEL}, - [EVSTATE__TMO_ACK] = {.str = "em_tmo_ack()", - .escope = EM_ESCOPE_TMO_ACK}, - [EVSTATE__TMO_ACK__NOSKIP] = {.str = "em_tmo_ack(noskip)", - .escope = EM_ESCOPE_TMO_ACK}, - [EVSTATE__TMO_ACK__FAIL] = {.str = "em_tmo_ack(fail)", - .escope = EM_ESCOPE_TMO_ACK}, - [EVSTATE__TMO_DELETE] = {.str = "em_tmo_delete()", - .escope = EM_ESCOPE_TMO_DELETE}, - [EVSTATE__AG_DELETE] = {.str = "em_atomic_group_delete(flush)", - .escope = EM_ESCOPE_ATOMIC_GROUP_DELETE}, - [EVSTATE__TERM_CORE__QUEUE_LOCAL] = {.str = "em_term_core(local-queue)", - .escope = EM_ESCOPE_TERM_CORE}, - [EVSTATE__TERM] = {.str = "em_term()", - .escope = EM_ESCOPE_TERM}, - /* Last: */ - [EVSTATE__LAST] = {.str = "last", - .escope = (EM_ESCOPE_INTERNAL_MASK | 0)} -}; - -static inline void -esv_update_state(ev_hdr_state_t *const evstate, const uint16_t api_op, - const void *const ev_ptr) -{ - const em_locm_t *const locm = &em_locm; - const uint32_t *const pl_u32 = ev_ptr; - const queue_elem_t *const q_elem = locm->current.q_elem; - - if (!q_elem) { - evstate->eo = EM_EO_UNDEF; - evstate->queue = EM_QUEUE_UNDEF; - } else { - evstate->eo = q_elem->eo; - evstate->queue = q_elem->queue; - } - evstate->api_op = api_op; - evstate->core = locm->core_id; - if (ev_ptr) - evstate->payload_first = *pl_u32; -} - -static inline void -evhdr_update_state(event_hdr_t *const ev_hdr, const uint16_t api_op) -{ - if (!em_shm->opt.esv.store_state) - return; /* don't store updated state */ - - const void *ev_ptr = NULL; - - if (em_shm->opt.esv.store_first_u32) - ev_ptr = event_pointer(ev_hdr->event); - - esv_update_state(&ev_hdr->state, api_op, ev_ptr); -} - -/* "Normal" ESV Error format */ -#define EVSTATE_ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error -- counts current(vs.expected):\t" \ -"evgen:%" PRIu16 "(%" PRIu16 ") free:%" PRIi16 "(%" PRIi16 ") send:%" PRIi16 "(%" PRIi16 ")\n" \ -" prev-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -"=> new-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" - -/* ESV Error format for em_event_unmark_send/free/_multi() */ -#define EVSTATE_UNMARK_ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ -" prev-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -"=> new-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" - -/* ESV Error format when esv.store_state = false */ -#define EVSTATE__NO_PREV_STATE__ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error -- counts current(vs.expected):\t" \ -"evgen:%" PRIu16 "(%" PRIu16 ") free:%" PRIi16 "(%" PRIi16 ") send:%" PRIi16 "(%" PRIi16 ")\n" \ -" prev-state:n/a (disabled in conf)\n" \ -"=> new-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ -" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" - -/* ESV Error format for em_event_unmark_send/free/_multi() when esv.store_state = false */ -#define EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT \ -"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ -" prev-state:n/a (disabled in conf)\n" \ -"=> new-state:%s core:%02u:\t" \ -" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" - -/** - * ESV Error reporting - */ -static inline void -esv_error(const evstate_cnt_t cnt, const evstate_cnt_t exp, evhdl_t evhdl, - const event_hdr_t *const ev_hdr, const uint16_t api_op, - bool is_unmark_error) -{ - uint16_t prev_op = ev_hdr->state.api_op; - ev_hdr_state_t prev_state = ev_hdr->state; /* store prev good state */ - ev_hdr_state_t err_state = {0}; /* store current invalid/error state */ - const em_event_t event = event_hdr_to_event(ev_hdr); - const void *ev_ptr = NULL; - - if (unlikely(prev_op > EVSTATE__LAST)) - prev_op = EVSTATE__UNDEF; - - const evstate_info_t *err_info = &evstate_info_tbl[api_op]; - const evstate_info_t *prev_info = &evstate_info_tbl[prev_op]; - - char curr_eoname[EM_EO_NAME_LEN] = "(noname)"; - char prev_eoname[EM_EO_NAME_LEN] = "(noname)"; - char curr_qname[EM_QUEUE_NAME_LEN] = "(noname)"; - char prev_qname[EM_QUEUE_NAME_LEN] = "(noname)"; - char curr_payload[sizeof("0x12345678 ")] = "(n/a)"; - char prev_payload[sizeof("0x12345678 ")] = "(n/a)"; - - const eo_elem_t *eo_elem; - const queue_elem_t *q_elem; - - /* Check event!=undef to avoid error in em_event_pointer() */ - if (likely(event != EM_EVENT_UNDEF)) - ev_ptr = event_pointer(event); - /* Store the new _invalid_ event-state info into a separate struct */ - esv_update_state(&err_state, api_op, ev_ptr); - - /* - * Print the first 32bits of the event payload on failure, - * the option 'esv.store_payload_first_u32' affects storing during valid - * state transitions. - */ - if (ev_ptr) { - snprintf(curr_payload, sizeof(curr_payload), - "0x%08" PRIx32 "", err_state.payload_first); - curr_payload[sizeof(curr_payload) - 1] = '\0'; - } - - /* current EO-name: */ - eo_elem = eo_elem_get(err_state.eo); - if (eo_elem != NULL) - eo_get_name(eo_elem, curr_eoname, sizeof(curr_eoname)); - /* current queue-name: */ - q_elem = queue_elem_get(err_state.queue); - if (q_elem != NULL) - queue_get_name(q_elem, curr_qname, sizeof(curr_qname)); - - const int16_t free_cnt = cnt.free_cnt - FREE_CNT_INIT; - const int16_t free_exp = exp.free_cnt - FREE_CNT_INIT; - const int16_t send_cnt = cnt.send_cnt - SEND_CNT_INIT; - const int16_t send_exp = exp.send_cnt - SEND_CNT_INIT; - uint16_t evgen_cnt = cnt.evgen; - const uint16_t evgen_hdl = evhdl.evgen; - - /* Read the previous event state only if it has been stored */ - if (em_shm->opt.esv.store_state) { - /* - * Print the first 32 bits of the event payload for the previous - * valid state transition, if enabled in the EM config file: - * 'esv.store_payload_first_u32 = true', otherwise not stored. - */ - if (em_shm->opt.esv.store_first_u32) { - snprintf(prev_payload, sizeof(prev_payload), - "0x%08" PRIx32 "", prev_state.payload_first); - prev_payload[sizeof(prev_payload) - 1] = '\0'; - } - /* previous EO-name: */ - eo_elem = eo_elem_get(prev_state.eo); - if (eo_elem != NULL) - eo_get_name(eo_elem, prev_eoname, sizeof(prev_eoname)); - /* previous queue-name: */ - q_elem = queue_elem_get(prev_state.queue); - if (q_elem != NULL) - queue_get_name(q_elem, prev_qname, sizeof(prev_qname)); - - if (!is_unmark_error) { - /* "Normal" ESV Error, prev state available */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, - EVSTATE_ERROR_FMT, - event, evgen_hdl, evgen_cnt, - free_cnt, free_exp, send_cnt, send_exp, - prev_info->str, prev_state.core, - prev_state.eo, prev_eoname, - prev_state.queue, prev_qname, - prev_payload, - err_info->str, err_state.core, - err_state.eo, curr_eoname, - err_state.queue, curr_qname, - curr_payload, - evhdl.event, evhdl.evptr); - } else { - /* - * ESV Error from em_event_unmark_send/free/_multi(), - * prev state available. - */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, - EVSTATE_UNMARK_ERROR_FMT, - event, - prev_info->str, prev_state.core, - prev_state.eo, prev_eoname, - prev_state.queue, prev_qname, - prev_payload, - err_info->str, err_state.core, - err_state.eo, curr_eoname, - err_state.queue, curr_qname, - curr_payload); - } - } else { /* em_shm->opt.esv.store_state == false */ - /* No previous state stored by EM at runtime */ - if (!is_unmark_error) { - /* "Normal" ESV Error, prev state not stored */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, - EVSTATE__NO_PREV_STATE__ERROR_FMT, - event, evgen_hdl, evgen_cnt, - free_cnt, free_exp, send_cnt, send_exp, - err_info->str, err_state.core, - err_state.eo, curr_eoname, - err_state.queue, curr_qname, - curr_payload, - evhdl.event, evhdl.evptr); - } else { - /* - * ESV Error from em_event_unmark_send/free/_multi(), - * prev state not stored. - */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), - err_info->escope, - EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT, - event, - err_info->str, err_state.core, - err_state.eo, curr_eoname, - err_state.queue, curr_qname, - curr_payload); - } - } -} - -static void -evstate_error(const evstate_cnt_t cnt, const evstate_cnt_t exp, evhdl_t evhdl, - const event_hdr_t *const ev_hdr, const uint16_t api_op) -{ - /* "Normal" ESV Error */ - esv_error(cnt, exp, evhdl, ev_hdr, api_op, false); -} - -/** - * ESV Error reporting for invalid em_event_unmark...() API use - */ -static void -evstate_unmark_error(const event_hdr_t *const ev_hdr, const uint16_t api_op) -{ - evstate_cnt_t dont_care = {.u64 = 0}; - evhdl_t dont_care_hdl = {.event = EM_EVENT_UNDEF}; - - /* ESV Error from em_event_unmark_send/free/_multi() */ - esv_error(dont_care, dont_care, dont_care_hdl, ev_hdr, api_op, true); -} - -static inline em_event_t -esv_evinit(const em_event_t event, event_hdr_t *const ev_hdr, - const evstate_cnt_t init_cnt, const uint16_t api_op) -{ - evhdl_t evhdl = {.event = event}; - - evhdl.evgen = EVGEN_INIT; - ev_hdr->event = evhdl.event; - - /* Set initial counters (atomic) */ - __atomic_store_n(&ev_hdr->state_cnt.u64, init_cnt.u64, - __ATOMIC_RELAXED); - /* Set initial state information (non-atomic) */ - evhdr_update_state(ev_hdr, api_op); - - return evhdl.event; -} - -static inline void -esv_evinit_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const evstate_cnt_t init_cnt, const uint16_t api_op) -{ - evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; - - for (int i = 0; i < num; i++) { - evhdl_tbl[i].evgen = EVGEN_INIT; - ev_hdr_tbl[i]->event = evhdl_tbl[i].event; - - /* Set initial counters for ext-events (atomic) */ - __atomic_store_n(&ev_hdr_tbl[i]->state_cnt.u64, - init_cnt.u64, __ATOMIC_RELAXED); - /* Set initial state information (non-atomic) */ - evhdr_update_state(ev_hdr_tbl[i], api_op); - } -} - -static inline em_event_t -esv_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, - const evstate_cnt_t cnt, const evstate_cnt_t exp_cnt, - const uint16_t api_op, const bool is_revert) -{ - evhdl_t evhdl = {.event = event}; - evstate_cnt_t new_cnt; - - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert previous em2usr counter update on failed operation */ - new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } else { - /* Normal em2usr counter update */ - new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } - evhdl.evgen = new_cnt.evgen; - ev_hdr->event = evhdl.event; - - if (unlikely(new_cnt.free_send_cnt != exp_cnt.free_send_cnt)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, exp_cnt, evhdl, ev_hdr, api_op); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - evhdr_update_state(ev_hdr, api_op); - - return evhdl.event; -} - -static inline void -esv_em2usr_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const evstate_cnt_t cnt, const evstate_cnt_t exp_cnt, - const uint16_t api_op, const bool is_revert) -{ - evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; - evstate_cnt_t new_cnt; - - for (int i = 0; i < num; i++) { - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert em2usr counter update on failed operation */ - new_cnt.u64 = - __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } else { - /* Normal em2usr counter update */ - new_cnt.u64 = - __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - } - evhdl_tbl[i].evgen = new_cnt.evgen; - ev_hdr_tbl[i]->event = evhdl_tbl[i].event; - - if (unlikely(new_cnt.free_send_cnt != exp_cnt.free_send_cnt)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, exp_cnt, evhdl_tbl[i], - ev_hdr_tbl[i], api_op); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - evhdr_update_state(ev_hdr_tbl[i], api_op); - } -} - -static inline void -esv_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, - const evstate_cnt_t cnt, const evstate_cnt_t exp_cnt, - const uint16_t api_op, const bool is_revert) -{ - evhdl_t evhdl = {.event = event}; - evstate_cnt_t new_cnt; - - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert previous usr2em counter update on failed operation */ - new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, - .free_cnt = 0, .send_cnt = 0}; - new_cnt.evgen = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - add.u64, __ATOMIC_RELAXED); - } - } else { - /* Normal usr2em counter update */ - new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_MAX)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, - .free_cnt = 0, .send_cnt = 0}; - __atomic_fetch_sub(&ev_hdr->state_cnt.u64, sub.u64, - __ATOMIC_RELAXED); - } - /* cmp new_cnt.evgen vs evhdl.evgen of previous gen, thus -1 */ - new_cnt.evgen -= 1; - } - - if (unlikely(new_cnt.free_send_cnt != exp_cnt.free_send_cnt || - evhdl.evgen != new_cnt.evgen)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, exp_cnt, evhdl, ev_hdr, api_op); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - evhdr_update_state(ev_hdr, api_op); -} - -static inline void -esv_usr2em_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const evstate_cnt_t cnt, const evstate_cnt_t exp_cnt, - const uint16_t api_op, const bool is_revert) -{ - const evhdl_t *const evhdl_tbl = (const evhdl_t *)ev_tbl; - evstate_cnt_t new_cnt; - - for (int i = 0; i < num; i++) { - /* Update state-count and return value of all counters (atomic) */ - if (unlikely(is_revert)) { - /* Revert usr2em counter update on failed operation */ - new_cnt.u64 = - __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, - .free_cnt = 0, .send_cnt = 0}; - new_cnt.evgen = __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - add.u64, __ATOMIC_RELAXED); - } - } else { - /* Normal usr2em counter update */ - new_cnt.u64 = - __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, - cnt.u64, __ATOMIC_RELAXED); - - if (unlikely(new_cnt.evgen == EVGEN_MAX)) { - /* Avoid .evgen counter wrap */ - const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, - .free_cnt = 0, .send_cnt = 0}; - __atomic_fetch_sub(&ev_hdr_tbl[i]->state_cnt.u64, sub.u64, - __ATOMIC_RELAXED); - } - - new_cnt.evgen -= 1; - } - - if (unlikely(new_cnt.free_send_cnt != exp_cnt.free_send_cnt || - evhdl_tbl[i].evgen != new_cnt.evgen)) { - /* report fatal event-state error, never return */ - evstate_error(new_cnt, exp_cnt, evhdl_tbl[i], - ev_hdr_tbl[i], api_op); - /* never reached */ - } - - /* - * Valid state transition, update state (non-atomic) - */ - evhdr_update_state(ev_hdr_tbl[i], api_op); - } -} - -em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr) -{ - return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__PREALLOC); -} - -em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr) -{ - if (!em_shm->opt.esv.prealloc_pools) - return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__ALLOC); - - const evstate_cnt_t sub = {.evgen = 0, .free_cnt = 1, .send_cnt = 0}; - - return esv_em2usr(event, ev_hdr, sub, exp_cnt_alloc, - EVSTATE__ALLOC, false); -} - -void evstate_alloc_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num) -{ - if (!em_shm->opt.esv.prealloc_pools) { - esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - init_cnt_alloc, EVSTATE__ALLOC_MULTI); - return; - } - - const evstate_cnt_t sub = {.evgen = 0, .free_cnt = 1, .send_cnt = 0}; - - esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - sub, exp_cnt_alloc, EVSTATE__ALLOC_MULTI, false); -} - -em_event_t evstate_clone(const em_event_t clone_event, event_hdr_t *const ev_hdr) -{ - if (!em_shm->opt.esv.prealloc_pools) - return esv_evinit(clone_event, ev_hdr, - init_cnt_alloc /* use 'alloc' init value */, - EVSTATE__EVENT_CLONE); - - const evstate_cnt_t sub = {.evgen = 0, .free_cnt = 1, .send_cnt = 0}; - - return esv_em2usr(clone_event, ev_hdr, sub, - exp_cnt_alloc /* use 'alloc' expected value */, - EVSTATE__EVENT_CLONE, false); -} - -em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, - bool is_extev) -{ - uint16_t api_op; - evstate_cnt_t init_cnt; - - if (is_extev) { - api_op = EVSTATE__INIT_EXTEV; - init_cnt = init_cnt_extev; - } else { - api_op = EVSTATE__INIT; - init_cnt = init_cnt_alloc; - } - - return esv_evinit(event, ev_hdr, init_cnt, api_op); -} - -void evstate_init_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - bool is_extev) -{ - uint16_t api_op; - evstate_cnt_t init_cnt; - - if (is_extev) { - api_op = EVSTATE__INIT_EXTEV_MULTI; - init_cnt = init_cnt_extev; - } else { - api_op = EVSTATE__INIT_MULTI; - init_cnt = init_cnt_alloc; - } - - esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - init_cnt, api_op); -} - -em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, - bool is_extev) -{ - em_event_t ret_event; - - /* mark allocated */ - const evstate_cnt_t sub = {.evgen = 0, .free_cnt = 1, .send_cnt = 0}; - - ret_event = esv_em2usr(event, ev_hdr, sub, exp_cnt_alloc, - EVSTATE__UPDATE_EXTEV, false); - - if (is_extev) { - /* mark sent */ - const evstate_cnt_t add = {.evgen = 1, - .free_cnt = 0, .send_cnt = 1}; - - esv_usr2em(ret_event, ev_hdr, add, exp_cnt_usr2em, - EVSTATE__UPDATE_EXTEV, false); - } - - return ret_event; -} - -void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .free_cnt = 1, .send_cnt = 0}; - - esv_usr2em(event, ev_hdr, add, exp_cnt_free, api_op, false); -} - -void evstate_free_revert(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .free_cnt = 1, .send_cnt = 0}; - - esv_usr2em(event, ev_hdr, sub, exp_cnt_free_revert, - api_op, true /*revert*/); -} - -void evstate_free_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .free_cnt = 1, .send_cnt = 0}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, - add, exp_cnt_free, api_op, false); -} - -void evstate_free_revert_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .free_cnt = 1, .send_cnt = 0}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, - sub, exp_cnt_free_revert, api_op, true /*revert*/); -} - -em_event_t evstate_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 0, .free_cnt = 0, .send_cnt = 1}; - - return esv_em2usr(event, ev_hdr, sub, exp_cnt_em2usr, api_op, false); -} - -em_event_t evstate_em2usr_revert(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 0, .free_cnt = 0, .send_cnt = 1}; - - return esv_em2usr(event, ev_hdr, add, exp_cnt_em2usr_revert, - api_op, true /*revert*/); -} - -void evstate_em2usr_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 0, .free_cnt = 0, .send_cnt = 1}; - - esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - sub, exp_cnt_em2usr, api_op, false); -} - -void evstate_em2usr_revert_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 0, .free_cnt = 0, .send_cnt = 1}; - - esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, - add, exp_cnt_em2usr_revert, api_op, true /*revert*/); -} - -void evstate_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .free_cnt = 0, .send_cnt = 1}; - - esv_usr2em(event, ev_hdr, add, exp_cnt_usr2em, api_op, false); -} - -void evstate_usr2em_revert(const em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .free_cnt = 0, .send_cnt = 1}; - - esv_usr2em(event, ev_hdr, sub, exp_cnt_usr2em_revert, - api_op, true /*revert*/); -} - -void evstate_usr2em_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t add = {.evgen = 1, .free_cnt = 0, .send_cnt = 1}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, exp_cnt_usr2em, - api_op, false); -} - -void evstate_usr2em_revert_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op) -{ - const evstate_cnt_t sub = {.evgen = 1, .free_cnt = 0, .send_cnt = 1}; - - esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, exp_cnt_usr2em_revert, - api_op, true /*revert*/); -} - -/* - * Ensure that em_event_unmark_...() is only called after - * em_event_mark_...() (not after normal em_send/free() etc). - */ -static inline void -check_valid_unmark(const event_hdr_t *ev_hdr, uint16_t api_op, - const uint16_t expected_ops[], const int num_ops) -{ - uint16_t prev_op = ev_hdr->state.api_op; - - for (int i = 0; i < num_ops; i++) { - if (prev_op == expected_ops[i]) - return; /* success */ - } - - /* previous API was NOT em_event_mark_..., report FATAL error! */ - evstate_unmark_error(ev_hdr, api_op); -} - -static inline void -check_valid_unmark_multi(event_hdr_t *const ev_hdr_tbl[], const int num_evs, - uint16_t api_op, const uint16_t expected_ops[], const int num_ops) -{ - uint16_t prev_op; - bool is_valid; - - for (int i = 0; i < num_evs; i++) { - prev_op = ev_hdr_tbl[i]->state.api_op; - is_valid = false; - - for (int j = 0; j < num_ops; j++) { - if (prev_op == expected_ops[j]) { - is_valid = true; - break; /* success */ - } - } - - /* previous API was NOT em_event_mark_..., report FATAL error!*/ - if (unlikely(!is_valid)) - evstate_unmark_error(ev_hdr_tbl[i], api_op); - } -} - -void evstate_unmark_send(const em_event_t event, event_hdr_t *const ev_hdr) -{ - uint16_t expected_prev_ops[1] = {EVSTATE__MARK_SEND}; - /* - * Ensure that em_event_unmark_send() is only called after - * em_event_mark_send/_multi() (not after em_send() etc). - */ - check_valid_unmark(ev_hdr, EVSTATE__UNMARK_SEND, - expected_prev_ops, 1); - evstate_usr2em_revert(event, ev_hdr, EVSTATE__UNMARK_SEND); -} - -void evstate_unmark_free(const em_event_t event, event_hdr_t *const ev_hdr) -{ - uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE, - EVSTATE__MARK_FREE_MULTI}; - /* - * Ensure that em_event_unmark_free() is only called - * after em_event_mark_free() (not after em_free() etc). - */ - check_valid_unmark(ev_hdr, EVSTATE__UNMARK_FREE, - expected_prev_ops, 2); - evstate_free_revert(event, ev_hdr, EVSTATE__UNMARK_FREE); -} - -void evstate_unmark_free_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num) -{ - uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE_MULTI, - EVSTATE__MARK_FREE}; - /* - * Ensure that em_event_unmark_free_multi() is only - * called after em_event_mark_free_multi() - * (not after em_free/_multi() etc). - */ - check_valid_unmark_multi(ev_hdr_tbl, num, EVSTATE__UNMARK_FREE_MULTI, - expected_prev_ops, 2); - evstate_free_revert_multi(ev_tbl, ev_hdr_tbl, num, - EVSTATE__UNMARK_FREE_MULTI); -} - -static int read_config_file(void) -{ - const char *conf_str; - bool val_bool = false; - int ret; - - EM_PRINT("EM ESV config:\n"); - - /* - * Option: esv.enable - runtime enable/disable - */ - conf_str = "esv.enable"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.enable = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - if (!em_shm->opt.esv.enable) { - /* Read no more options if ESV is disabled */ - memset(&em_shm->opt.esv, 0, sizeof(em_shm->opt.esv)); - return 0; - } - - /* - * Option: esv.store_state - */ - conf_str = "esv.store_state"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.store_state = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - /* - * Option: esv.store_payload_first_u32 - */ - conf_str = "esv.store_payload_first_u32"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.store_first_u32 = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - /* - * Option: esv.prealloc_pools - */ - conf_str = "esv.prealloc_pools"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - /* store & print the value */ - em_shm->opt.esv.prealloc_pools = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - return 0; -} - -em_status_t esv_init(void) -{ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - return EM_OK; -} +/* + * Copyright (c) 2020-2022, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +static int read_config_file(void); + +/** + * Initial counter values set during an alloc-operation: ref=1, send=0 + * (em_alloc/_multi(), em_event_clone()) + */ +static const evstate_cnt_t init_cnt_alloc = {.evgen = EVGEN_INIT, + .rsvd = 0, + .ref_cnt = REF_CNT_INIT - 1, + .send_cnt = 0 + SEND_CNT_INIT}; +/** + * Initial counter values for external events entering into EM + * (event not allocated by EM): ref=1, send=1 + */ +static const evstate_cnt_t init_cnt_extev = {.evgen = EVGEN_INIT, + .rsvd = 0, + .ref_cnt = REF_CNT_INIT - 1, + .send_cnt = 1 + SEND_CNT_INIT}; + +/** + * Information about an event-state update location + */ +typedef struct { + const char *str; + em_escope_t escope; +} evstate_info_t; + +/** + * Constant table containing event-state update location information. + * Only accessed when an erroneous event state has been detected and is being + * reported to the error handler. + */ +static const evstate_info_t evstate_info_tbl[] = { + [EVSTATE__UNDEF] = {.str = "undefined", + .escope = (EM_ESCOPE_INTERNAL_MASK | 0)}, + [EVSTATE__PREALLOC] = {.str = "pool-create(prealloc-events)", + .escope = EM_ESCOPE_POOL_CREATE}, + [EVSTATE__ALLOC] = {.str = "em_alloc()", + .escope = EM_ESCOPE_ALLOC}, + [EVSTATE__ALLOC_MULTI] = {.str = "em_alloc_multi()", + .escope = EM_ESCOPE_ALLOC_MULTI}, + [EVSTATE__EVENT_CLONE] = {.str = "em_event_clone()", + .escope = EM_ESCOPE_EVENT_CLONE}, + [EVSTATE__EVENT_REF] = {.str = "em_event_ref()", + .escope = EM_ESCOPE_EVENT_REF}, + [EVSTATE__FREE] = {.str = "em_free()", + .escope = EM_ESCOPE_FREE}, + [EVSTATE__FREE_MULTI] = {.str = "em_free_multi()", + .escope = EM_ESCOPE_FREE_MULTI}, + [EVSTATE__EVENT_VECTOR_FREE] = {.str = "em_event_vector_free()", + .escope = EM_ESCOPE_EVENT_VECTOR_FREE}, + [EVSTATE__INIT] = {.str = "init-event", + .escope = EM_ESCOPE_ODP_EXT}, + [EVSTATE__INIT_MULTI] = {.str = "init-events", + .escope = EM_ESCOPE_ODP_EXT}, + [EVSTATE__INIT_EXTEV] = {.str = "dispatch(init-ext-event)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__INIT_EXTEV_MULTI] = {.str = "dispatch(init-ext-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__UPDATE_EXTEV] = {.str = "dispatch(update-ext-event)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__SEND] = {.str = "em_send()", + .escope = EM_ESCOPE_SEND}, + [EVSTATE__SEND__FAIL] = {.str = "em_send(fail)", + .escope = EM_ESCOPE_SEND}, + [EVSTATE__SEND_EGRP] = {.str = "em_send_group()", + .escope = EM_ESCOPE_SEND_GROUP}, + [EVSTATE__SEND_EGRP__FAIL] = {.str = "em_send_group(fail)", + .escope = EM_ESCOPE_SEND_GROUP}, + [EVSTATE__SEND_MULTI] = {.str = "em_send_multi()", + .escope = EM_ESCOPE_SEND_MULTI}, + [EVSTATE__SEND_MULTI__FAIL] = {.str = "em_send_multi(fail)", + .escope = EM_ESCOPE_SEND_MULTI}, + [EVSTATE__SEND_EGRP_MULTI] = {.str = "em_send_group_multi()", + .escope = EM_ESCOPE_SEND_GROUP_MULTI}, + [EVSTATE__SEND_EGRP_MULTI__FAIL] = {.str = "em_send_group_multi(fail)", + .escope = EM_ESCOPE_SEND_GROUP_MULTI}, + [EVSTATE__EO_START_SEND_BUFFERED] = {.str = "eo-start:send-buffered-events()", + .escope = EM_ESCOPE_SEND_MULTI}, + [EVSTATE__MARK_SEND] = {.str = "em_event_mark_send()", + .escope = EM_ESCOPE_EVENT_MARK_SEND}, + [EVSTATE__UNMARK_SEND] = {.str = "em_event_unmark_send()", + .escope = EM_ESCOPE_EVENT_UNMARK_SEND}, + [EVSTATE__MARK_FREE] = {.str = "em_event_mark_free()", + .escope = EM_ESCOPE_EVENT_MARK_FREE}, + [EVSTATE__UNMARK_FREE] = {.str = "em_event_unmark_free()", + .escope = EM_ESCOPE_EVENT_UNMARK_FREE}, + [EVSTATE__MARK_FREE_MULTI] = {.str = "em_event_mark_free_multi()", + .escope = EM_ESCOPE_EVENT_MARK_FREE_MULTI}, + [EVSTATE__UNMARK_FREE_MULTI] = {.str = "em_event_unmark_free_multi()", + .escope = EM_ESCOPE_EVENT_UNMARK_FREE_MULTI}, + [EVSTATE__DISPATCH] = {.str = "em_dispatch(single-event)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DISPATCH_MULTI] = {.str = "em_dispatch(multiple-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DISPATCH_SCHED__FAIL] = {.str = "em_dispatch(drop sched-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DISPATCH_LOCAL__FAIL] = {.str = "em_dispatch(drop local-events)", + .escope = EM_ESCOPE_DISPATCH}, + [EVSTATE__DEQUEUE] = {.str = "em_queue_dequeue()", + .escope = EM_ESCOPE_QUEUE_DEQUEUE}, + [EVSTATE__DEQUEUE_MULTI] = {.str = "em_queue_dequeue_multi()", + .escope = EM_ESCOPE_QUEUE_DEQUEUE_MULTI}, + [EVSTATE__TMO_SET_ABS] = {.str = "em_tmo_set_abs()", + .escope = EM_ESCOPE_TMO_SET_ABS}, + [EVSTATE__TMO_SET_ABS__FAIL] = {.str = "em_tmo_set_abs(fail)", + .escope = EM_ESCOPE_TMO_SET_ABS}, + [EVSTATE__TMO_SET_REL] = {.str = "em_tmo_set_rel()", + .escope = EM_ESCOPE_TMO_SET_REL}, + [EVSTATE__TMO_SET_REL__FAIL] = {.str = "em_tmo_set_rel(fail)", + .escope = EM_ESCOPE_TMO_SET_REL}, + [EVSTATE__TMO_SET_PERIODIC] = {.str = "em_tmo_set_periodic()", + .escope = EM_ESCOPE_TMO_SET_PERIODIC}, + [EVSTATE__TMO_SET_PERIODIC__FAIL] = {.str = "em_tmo_set_periodic(fail)", + .escope = EM_ESCOPE_TMO_SET_PERIODIC}, + [EVSTATE__TMO_CANCEL] = {.str = "em_tmo_cancel()", + .escope = EM_ESCOPE_TMO_CANCEL}, + [EVSTATE__TMO_ACK] = {.str = "em_tmo_ack()", + .escope = EM_ESCOPE_TMO_ACK}, + [EVSTATE__TMO_ACK__NOSKIP] = {.str = "em_tmo_ack(noskip)", + .escope = EM_ESCOPE_TMO_ACK}, + [EVSTATE__TMO_ACK__FAIL] = {.str = "em_tmo_ack(fail)", + .escope = EM_ESCOPE_TMO_ACK}, + [EVSTATE__TMO_DELETE] = {.str = "em_tmo_delete()", + .escope = EM_ESCOPE_TMO_DELETE}, + [EVSTATE__AG_DELETE] = {.str = "em_atomic_group_delete(flush)", + .escope = EM_ESCOPE_ATOMIC_GROUP_DELETE}, + [EVSTATE__TERM_CORE__QUEUE_LOCAL] = {.str = "em_term_core(local-queue)", + .escope = EM_ESCOPE_TERM_CORE}, + [EVSTATE__TERM] = {.str = "em_term()", + .escope = EM_ESCOPE_TERM}, + /* Last: */ + [EVSTATE__LAST] = {.str = "last", + .escope = (EM_ESCOPE_INTERNAL_MASK | 0)} +}; + +static const char *const help_str_em2usr = +"OK: 'send < ref, both >=0'. Err otherwise"; +static const char *const help_str_usr2em = +"OK: 'send <= ref, both >=0' AND 'hdl evgen == evgen'. Err otherwise"; +static const char *const help_str_usr2em_ref = +"OK: 'send <= ref, both >=0'. Err otherwise"; + +static inline void +esv_update_state(ev_hdr_state_t *const evstate, const uint16_t api_op, + const void *const ev_ptr) +{ + const em_locm_t *const locm = &em_locm; + const uint32_t *const pl_u32 = ev_ptr; + const queue_elem_t *const q_elem = locm->current.q_elem; + + if (!q_elem) { + evstate->eo = EM_EO_UNDEF; + evstate->queue = EM_QUEUE_UNDEF; + } else { + evstate->eo = q_elem->eo; + evstate->queue = q_elem->queue; + } + evstate->api_op = api_op; + evstate->core = locm->core_id; + if (ev_ptr) + evstate->payload_first = *pl_u32; +} + +static inline void +evhdr_update_state(event_hdr_t *const ev_hdr, const uint16_t api_op) +{ + if (!em_shm->opt.esv.store_state) + return; /* don't store updated state */ + + const void *ev_ptr = NULL; + + if (em_shm->opt.esv.store_first_u32) + ev_ptr = event_pointer(ev_hdr->event); + + esv_update_state(&ev_hdr->state, api_op, ev_ptr); +} + +/* "Normal" ESV Error format */ +#define EVSTATE_ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error -- counts:\t" \ +"send:%" PRIi16 " ref:%" PRIi16 " evgen:%" PRIu16 "(%" PRIu16 ")\n" \ +" Help: %s\n" \ +" prev-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" + +/* ESV Error format for references */ +#define EVSTATE_REF_ERROR_FMT \ +"ESV: RefEvent:%" PRI_EVENT " state error -- counts:\t" \ +"send:%" PRIi16 " ref:%" PRIi16 " (evgen:%" PRIu16 " ignored for refs)\n" \ +" Help: %s\n" \ +" prev-state:n/a (not valid for event references)\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" + +/* ESV Error format for em_event_unmark_send/free/_multi() */ +#define EVSTATE_UNMARK_ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ +" prev-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" + +/* ESV Error format when esv.store_state = false */ +#define EVSTATE__NO_PREV_STATE__ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error -- counts:\t" \ +"send:%" PRIi16 " ref:%" PRIi16 " evgen:%" PRIu16 "(%" PRIu16 ")\n" \ +" Help: %s\n" \ +" prev-state:n/a (disabled in conf)\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" \ +" event:0x%016" PRIx64 ": ptr:0x%" PRIx64 "" + +/* ESV Error format for em_event_unmark_send/free/_multi() when esv.store_state = false */ +#define EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT \ +"ESV: Event:%" PRI_EVENT " state error - Invalid 'unmark'-API use\n"\ +" prev-state:n/a (disabled in conf)\n" \ +"=> err-state:%s core:%02u:\t" \ +" EO:%" PRI_EO "-\"%s\" Q:%" PRI_QUEUE "-\"%s\" u32[0]:%s\n" + +/** + * ESV Error reporting + */ +static inline void +esv_error(const evstate_cnt_t cnt, + evhdl_t evhdl, const event_hdr_t *const ev_hdr, + const uint16_t api_op, bool is_unmark_error, + const char *const help_str) +{ + uint16_t prev_op = ev_hdr->state.api_op; + ev_hdr_state_t prev_state = ev_hdr->state; /* store prev good state */ + ev_hdr_state_t err_state = {0}; /* store current invalid/error state */ + const em_event_t event = event_hdr_to_event(ev_hdr); + const void *ev_ptr = NULL; + + if (unlikely(prev_op > EVSTATE__LAST)) + prev_op = EVSTATE__UNDEF; + + const evstate_info_t *err_info = &evstate_info_tbl[api_op]; + const evstate_info_t *prev_info = &evstate_info_tbl[prev_op]; + + char curr_eoname[EM_EO_NAME_LEN] = "(noname)"; + char prev_eoname[EM_EO_NAME_LEN] = "(noname)"; + char curr_qname[EM_QUEUE_NAME_LEN] = "(noname)"; + char prev_qname[EM_QUEUE_NAME_LEN] = "(noname)"; + char curr_payload[sizeof("0x12345678 ")] = "(n/a)"; + char prev_payload[sizeof("0x12345678 ")] = "(n/a)"; + + const eo_elem_t *eo_elem; + const queue_elem_t *q_elem; + + /* Check event!=undef to avoid error in event_pointer() */ + if (likely(event != EM_EVENT_UNDEF)) + ev_ptr = event_pointer(event); + /* Store the new _invalid_ event-state info into a separate struct */ + esv_update_state(&err_state, api_op, ev_ptr); + + /* + * Print the first 32bits of the event payload on failure, + * the option 'esv.store_payload_first_u32' affects storing during valid + * state transitions. + */ + if (ev_ptr) { + snprintf(curr_payload, sizeof(curr_payload), + "0x%08" PRIx32 "", err_state.payload_first); + curr_payload[sizeof(curr_payload) - 1] = '\0'; + } + + /* current EO-name: */ + eo_elem = eo_elem_get(err_state.eo); + if (eo_elem != NULL) + eo_get_name(eo_elem, curr_eoname, sizeof(curr_eoname)); + /* current queue-name: */ + q_elem = queue_elem_get(err_state.queue); + if (q_elem != NULL) + queue_get_name(q_elem, curr_qname, sizeof(curr_qname)); + + const int16_t send_cnt = cnt.send_cnt - SEND_CNT_INIT; + uint16_t evgen_cnt = cnt.evgen; + const uint16_t evgen_hdl = evhdl.evgen; + const int16_t ref_cnt = REF_CNT_INIT - cnt.ref_cnt; + + /* Read the previous event state only if it has been stored */ + if (em_shm->opt.esv.store_state) { + /* + * Print the first 32 bits of the event payload for the previous + * valid state transition, if enabled in the EM config file: + * 'esv.store_payload_first_u32 = true', otherwise not stored. + */ + if (em_shm->opt.esv.store_first_u32) { + snprintf(prev_payload, sizeof(prev_payload), + "0x%08" PRIx32 "", prev_state.payload_first); + prev_payload[sizeof(prev_payload) - 1] = '\0'; + } + /* previous EO-name: */ + eo_elem = eo_elem_get(prev_state.eo); + if (eo_elem != NULL) + eo_get_name(eo_elem, prev_eoname, sizeof(prev_eoname)); + /* previous queue-name: */ + q_elem = queue_elem_get(prev_state.queue); + if (q_elem != NULL) + queue_get_name(q_elem, prev_qname, sizeof(prev_qname)); + + if (ev_hdr->flags.refs_used) { + /* Reference ESV Error, prev state available */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE_REF_ERROR_FMT, + event, send_cnt, ref_cnt, evgen_cnt, help_str, + err_info->str, err_state.core, + err_state.eo, curr_eoname, err_state.queue, curr_qname, + curr_payload, evhdl.event, evhdl.evptr); + } else if (!is_unmark_error) { + /* "Normal" ESV Error, prev state available */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE_ERROR_FMT, + event, send_cnt, ref_cnt, evgen_hdl, evgen_cnt, help_str, + prev_info->str, prev_state.core, prev_state.eo, prev_eoname, + prev_state.queue, prev_qname, prev_payload, + err_info->str, err_state.core, err_state.eo, curr_eoname, + err_state.queue, curr_qname, curr_payload, + evhdl.event, evhdl.evptr); + } else { + /* + * ESV Error from em_event_unmark_send/free/_multi(), + * prev state available. + */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE_UNMARK_ERROR_FMT, + event, + prev_info->str, prev_state.core, + prev_state.eo, prev_eoname, + prev_state.queue, prev_qname, prev_payload, + err_info->str, err_state.core, + err_state.eo, curr_eoname, + err_state.queue, curr_qname, curr_payload); + } + } else { /* em_shm->opt.esv.store_state == false */ + /* No previous state stored by EM at runtime */ + if (!is_unmark_error) { + /* "Normal" ESV Error, prev state not stored */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE__NO_PREV_STATE__ERROR_FMT, + event, send_cnt, ref_cnt, evgen_hdl, evgen_cnt, help_str, + err_info->str, err_state.core, err_state.eo, curr_eoname, + err_state.queue, curr_qname, curr_payload, + evhdl.event, evhdl.evptr); + } else { + /* + * ESV Error from em_event_unmark_send/free/_multi(), + * prev state not stored. + */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_EVENT_STATE), + err_info->escope, EVSTATE__NO_PREV_STATE__UNMARK_ERROR_FMT, + event, + err_info->str, err_state.core, err_state.eo, curr_eoname, + err_state.queue, curr_qname, curr_payload); + } + } +} + +static void +evstate_error(const evstate_cnt_t cnt, evhdl_t evhdl, + const event_hdr_t *const ev_hdr, const uint16_t api_op, + const char *const help_str) +{ + /* "Normal" ESV Error */ + esv_error(cnt, evhdl, ev_hdr, api_op, false, help_str); +} + +/** + * ESV Error reporting for invalid em_event_unmark...() API use + */ +static void +evstate_unmark_error(const event_hdr_t *const ev_hdr, const uint16_t api_op) +{ + evstate_cnt_t dont_care = {.u64 = 0}; + evhdl_t dont_care_hdl = {.event = EM_EVENT_UNDEF}; + + /* ESV Error from em_event_unmark_send/free/_multi() */ + esv_error(dont_care, dont_care_hdl, ev_hdr, api_op, true, "n/a"); +} + +static inline em_event_t +esv_evinit(const em_event_t event, event_hdr_t *const ev_hdr, + const evstate_cnt_t init_cnt, const uint16_t api_op) +{ + evhdl_t evhdl = {.event = event}; + + evhdl.evgen = EVGEN_INIT; + ev_hdr->event = evhdl.event; + + /* Set initial counters (atomic) */ + __atomic_store_n(&ev_hdr->state_cnt.u64, init_cnt.u64, + __ATOMIC_RELAXED); + /* Set initial state information (non-atomic) */ + evhdr_update_state(ev_hdr, api_op); + + return evhdl.event; +} + +static inline void +esv_evinit_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const evstate_cnt_t init_cnt, const uint16_t api_op) +{ + evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; + + for (int i = 0; i < num; i++) { + evhdl_tbl[i].evgen = EVGEN_INIT; + ev_hdr_tbl[i]->event = evhdl_tbl[i].event; + + /* Set initial counters for ext-events (atomic) */ + __atomic_store_n(&ev_hdr_tbl[i]->state_cnt.u64, + init_cnt.u64, __ATOMIC_RELAXED); + /* Set initial state information (non-atomic) */ + evhdr_update_state(ev_hdr_tbl[i], api_op); + } +} + +static inline em_event_t +esv_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, + const evstate_cnt_t cnt, const uint16_t api_op, const bool is_revert) +{ + const bool refs_used = ev_hdr->flags.refs_used; + evhdl_t evhdl = {.event = event}; + evstate_cnt_t new_cnt; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert previous em2usr counter update on failed operation */ + new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } else { + /* Normal em2usr counter update */ + new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } + + if (!refs_used) { + evhdl.evgen = new_cnt.evgen; + ev_hdr->event = evhdl.event; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt < ref_cnt and both >=0. Error otherwise. + */ + if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str_em2usr); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr, api_op); + + return evhdl.event; +} + +static inline void +esv_em2usr_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const evstate_cnt_t cnt, const uint16_t api_op, + const bool is_revert) +{ + evhdl_t *const evhdl_tbl = (evhdl_t *)ev_tbl; + evstate_cnt_t new_cnt; + + for (int i = 0; i < num; i++) { + const bool refs_used = ev_hdr_tbl[i]->flags.refs_used; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert em2usr counter update on failed operation */ + new_cnt.u64 = + __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } else { + /* Normal em2usr counter update */ + new_cnt.u64 = + __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + } + + if (!refs_used) { + evhdl_tbl[i].evgen = new_cnt.evgen; + ev_hdr_tbl[i]->event = evhdl_tbl[i].event; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt < ref_cnt and both >=0. Error otherwise. + */ + if (unlikely(send_cnt >= ref_cnt || send_cnt < 0)) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl_tbl[i], ev_hdr_tbl[i], + api_op, help_str_em2usr); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr_tbl[i], api_op); + } +} + +static inline void +esv_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, + const evstate_cnt_t cnt, const uint16_t api_op, const bool is_revert) +{ + const bool refs_used = ev_hdr->flags.refs_used; + evhdl_t evhdl = {.event = event}; + evstate_cnt_t new_cnt; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert previous usr2em counter update on failed operation */ + new_cnt.u64 = __atomic_sub_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + new_cnt.evgen = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + add.u64, __ATOMIC_RELAXED); + } + } else { + /* Normal usr2em counter update */ + new_cnt.u64 = __atomic_add_fetch(&ev_hdr->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_MAX)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + __atomic_fetch_sub(&ev_hdr->state_cnt.u64, sub.u64, + __ATOMIC_RELAXED); + } + /* cmp new_cnt.evgen vs evhdl.evgen of previous gen, thus -1 */ + new_cnt.evgen -= 1; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt <= ref_cnt and both >=0. + * AND + * OK: event handle evgen == evgen count (not checked for references) + * Error otherwise. + * + * Check evgen only for events that never had references. + * Reference usage mixes up the evgen since the same event can be + * sent and freed multiple times. + */ + if (unlikely((send_cnt > ref_cnt || send_cnt < 0) || + (!refs_used && evhdl.evgen != new_cnt.evgen))) { + const char *const help_str = refs_used ? help_str_usr2em_ref : help_str_usr2em; + + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl, ev_hdr, api_op, help_str); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr, api_op); +} + +static inline void +esv_usr2em_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const evstate_cnt_t cnt, const uint16_t api_op, + const bool is_revert) +{ + const evhdl_t *const evhdl_tbl = (const evhdl_t *)ev_tbl; + evstate_cnt_t new_cnt; + + for (int i = 0; i < num; i++) { + const bool refs_used = ev_hdr_tbl[i]->flags.refs_used; + + /* Update state-count and return value of all counters (atomic) */ + if (unlikely(is_revert)) { + /* Revert usr2em counter update on failed operation */ + new_cnt.u64 = + __atomic_sub_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_INIT - 1)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t add = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + new_cnt.evgen = __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + add.u64, __ATOMIC_RELAXED); + } + } else { + /* Normal usr2em counter update */ + new_cnt.u64 = + __atomic_add_fetch(&ev_hdr_tbl[i]->state_cnt.u64, + cnt.u64, __ATOMIC_RELAXED); + + if (unlikely(new_cnt.evgen == EVGEN_MAX)) { + /* Avoid .evgen counter wrap */ + const evstate_cnt_t sub = {.evgen = EVGEN_MAX - EVGEN_INIT, + .rsvd = 0, .ref_cnt = 0, .send_cnt = 0}; + __atomic_fetch_sub(&ev_hdr_tbl[i]->state_cnt.u64, sub.u64, + __ATOMIC_RELAXED); + } + + new_cnt.evgen -= 1; + } + + const int16_t ref_cnt = REF_CNT_INIT - new_cnt.ref_cnt; + const int16_t send_cnt = new_cnt.send_cnt - SEND_CNT_INIT; + + /* + * Check state count: + * OK: send_cnt <= ref_cnt and both >=0. + * AND + * OK: event handle evgen == evgen count (not checked for references) + * Error otherwise. + * + * Check evgen only for events that never had references. + * Reference usage mixes up the evgen since the same event can be + * sent and freed multiple times. + */ + if (unlikely((send_cnt > ref_cnt || send_cnt < 0) || + (!refs_used && evhdl_tbl[i].evgen != new_cnt.evgen))) { + /* report fatal event-state error, never return */ + evstate_error(new_cnt, evhdl_tbl[i], ev_hdr_tbl[i], + api_op, help_str_usr2em); + /* never reached */ + } + + /* + * Valid state transition, update state (non-atomic) + */ + if (!refs_used) + evhdr_update_state(ev_hdr_tbl[i], api_op); + } +} + +em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr) +{ + return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__PREALLOC); +} + +em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr) +{ + if (!em_shm->opt.esv.prealloc_pools) + return esv_evinit(event, ev_hdr, init_cnt_alloc, EVSTATE__ALLOC); + + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + return esv_em2usr(event, ev_hdr, sub, EVSTATE__ALLOC, false); +} + +void evstate_alloc_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num) +{ + if (!em_shm->opt.esv.prealloc_pools) { + esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + init_cnt_alloc, EVSTATE__ALLOC_MULTI); + return; + } + + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + sub, EVSTATE__ALLOC_MULTI, false); +} + +em_event_t evstate_clone(const em_event_t clone_event, event_hdr_t *const ev_hdr) +{ + if (!em_shm->opt.esv.prealloc_pools) + return esv_evinit(clone_event, ev_hdr, + init_cnt_alloc /* use 'alloc' init value */, + EVSTATE__EVENT_CLONE); + + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + return esv_em2usr(clone_event, ev_hdr, sub, EVSTATE__EVENT_CLONE, false); +} + +em_event_t evstate_ref(const em_event_t event, event_hdr_t *const ev_hdr) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + return esv_em2usr(event, ev_hdr, sub, EVSTATE__EVENT_REF, false); +} + +em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, + bool is_extev) +{ + uint16_t api_op; + evstate_cnt_t init_cnt; + + if (is_extev) { + api_op = EVSTATE__INIT_EXTEV; + init_cnt = init_cnt_extev; + } else { + api_op = EVSTATE__INIT; + init_cnt = init_cnt_alloc; + } + + return esv_evinit(event, ev_hdr, init_cnt, api_op); +} + +void evstate_init_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + bool is_extev) +{ + uint16_t api_op; + evstate_cnt_t init_cnt; + + if (is_extev) { + api_op = EVSTATE__INIT_EXTEV_MULTI; + init_cnt = init_cnt_extev; + } else { + api_op = EVSTATE__INIT_MULTI; + init_cnt = init_cnt_alloc; + } + + esv_evinit_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, + init_cnt, api_op); +} + +em_event_t evstate_update(const em_event_t event, event_hdr_t *const ev_hdr, + bool is_extev) +{ + em_event_t ret_event; + + /* mark allocated */ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + ret_event = esv_em2usr(event, ev_hdr, sub, EVSTATE__UPDATE_EXTEV, false); + + if (is_extev) { + /* mark sent */ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em(ret_event, ev_hdr, add, EVSTATE__UPDATE_EXTEV, false); + } + + return ret_event; +} + +void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em(event, ev_hdr, add, api_op, false); +} + +void evstate_free_revert(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em(event, ev_hdr, sub, api_op, true /*revert*/); +} + +void evstate_free_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, api_op, false); +} + +void evstate_free_revert_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 1, .send_cnt = 0}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, api_op, true /*revert*/); +} + +em_event_t evstate_em2usr(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + return esv_em2usr(event, ev_hdr, sub, api_op, false); +} + +em_event_t evstate_em2usr_revert(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + return esv_em2usr(event, ev_hdr, add, api_op, true /*revert*/); +} + +void evstate_em2usr_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, sub, api_op, false); +} + +void evstate_em2usr_revert_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 0, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, num, add, api_op, true /*revert*/); +} + +void evstate_usr2em(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em(event, ev_hdr, add, api_op, false); +} + +void evstate_usr2em_revert(const em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em(event, ev_hdr, sub, api_op, true /*revert*/); +} + +void evstate_usr2em_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t add = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, add, api_op, false); +} + +void evstate_usr2em_revert_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op) +{ + const evstate_cnt_t sub = {.evgen = 1, .rsvd = 0, + .ref_cnt = 0, .send_cnt = 1}; + + esv_usr2em_multi(ev_tbl, ev_hdr_tbl, num, sub, api_op, true /*revert*/); +} + +/* + * Ensure that em_event_unmark_...() is only called after + * em_event_mark_...() (not after normal em_send/free() etc). + */ +static inline void +check_valid_unmark(const event_hdr_t *ev_hdr, uint16_t api_op, + const uint16_t expected_ops[], const int num_ops) +{ + /* event refs: can't rely on prev api_op */ + if (ev_hdr->flags.refs_used) + return; + + uint16_t prev_op = ev_hdr->state.api_op; + + for (int i = 0; i < num_ops; i++) { + if (prev_op == expected_ops[i]) + return; /* success */ + } + + /* previous API was NOT em_event_mark_..., report FATAL error! */ + evstate_unmark_error(ev_hdr, api_op); +} + +static inline void +check_valid_unmark_multi(event_hdr_t *const ev_hdr_tbl[], const int num_evs, + uint16_t api_op, const uint16_t expected_ops[], const int num_ops) +{ + uint16_t prev_op; + bool is_valid; + + for (int i = 0; i < num_evs; i++) { + /* event refs: can't rely on prev api_op */ + if (ev_hdr_tbl[i]->flags.refs_used) + continue; + + prev_op = ev_hdr_tbl[i]->state.api_op; + is_valid = false; + + for (int j = 0; j < num_ops; j++) { + if (prev_op == expected_ops[j]) { + is_valid = true; + break; /* success */ + } + } + + /* previous API was NOT em_event_mark_..., report FATAL error!*/ + if (unlikely(!is_valid)) + evstate_unmark_error(ev_hdr_tbl[i], api_op); + } +} + +void evstate_unmark_send(const em_event_t event, event_hdr_t *const ev_hdr) +{ + if (em_shm->opt.esv.store_state) { + uint16_t expected_prev_ops[1] = {EVSTATE__MARK_SEND}; + /* + * Ensure that em_event_unmark_send() is only called after + * em_event_mark_send/_multi() (not after em_send() etc). + */ + check_valid_unmark(ev_hdr, EVSTATE__UNMARK_SEND, + expected_prev_ops, 1); + } + + evstate_usr2em_revert(event, ev_hdr, EVSTATE__UNMARK_SEND); +} + +void evstate_unmark_free(const em_event_t event, event_hdr_t *const ev_hdr) +{ + if (em_shm->opt.esv.store_state) { + uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE, + EVSTATE__MARK_FREE_MULTI}; + /* + * Ensure that em_event_unmark_free() is only called + * after em_event_mark_free() (not after em_free() etc). + */ + check_valid_unmark(ev_hdr, EVSTATE__UNMARK_FREE, + expected_prev_ops, 2); + } + + evstate_free_revert(event, ev_hdr, EVSTATE__UNMARK_FREE); +} + +void evstate_unmark_free_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num) +{ + if (em_shm->opt.esv.store_state) { + uint16_t expected_prev_ops[2] = {EVSTATE__MARK_FREE_MULTI, + EVSTATE__MARK_FREE}; + /* + * Ensure that em_event_unmark_free_multi() is only + * called after em_event_mark_free_multi() + * (not after em_free/_multi() etc). + */ + check_valid_unmark_multi(ev_hdr_tbl, num, + EVSTATE__UNMARK_FREE_MULTI, + expected_prev_ops, 2); + } + + evstate_free_revert_multi(ev_tbl, ev_hdr_tbl, num, + EVSTATE__UNMARK_FREE_MULTI); +} + +static int read_config_file(void) +{ + const char *conf_str; + bool val_bool = false; + int ret; + + EM_PRINT("EM ESV config:\n"); + + /* + * Option: esv.enable - runtime enable/disable + */ + conf_str = "esv.enable"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.enable = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + if (!em_shm->opt.esv.enable) { + /* Read no more options if ESV is disabled */ + memset(&em_shm->opt.esv, 0, sizeof(em_shm->opt.esv)); + return 0; + } + + /* + * Option: esv.store_state + */ + conf_str = "esv.store_state"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.store_state = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* + * Option: esv.store_payload_first_u32 + */ + conf_str = "esv.store_payload_first_u32"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.store_first_u32 = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* + * Option: esv.prealloc_pools + */ + conf_str = "esv.prealloc_pools"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + /* store & print the value */ + em_shm->opt.esv.prealloc_pools = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + return 0; +} + +em_status_t esv_init(void) +{ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + return EM_OK; +} diff --git a/src/em_event_state.h b/src/em_event_state.h index fe406102..ca64eca3 100644 --- a/src/em_event_state.h +++ b/src/em_event_state.h @@ -1,275 +1,279 @@ -/* - * Copyright (c) 2020, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * EM event state verification support - */ - -#ifndef EM_EVENT_CHECKS_H_ -#define EM_EVENT_CHECKS_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#define EVSTATE__UNDEF 0 -#define EVSTATE__PREALLOC 1 -#define EVSTATE__ALLOC 2 -#define EVSTATE__ALLOC_MULTI 3 -#define EVSTATE__EVENT_CLONE 4 -#define EVSTATE__FREE 5 -#define EVSTATE__FREE_MULTI 6 -#define EVSTATE__INIT 7 -#define EVSTATE__INIT_MULTI 8 -#define EVSTATE__INIT_EXTEV 9 -#define EVSTATE__INIT_EXTEV_MULTI 10 -#define EVSTATE__UPDATE_EXTEV 11 -#define EVSTATE__SEND 12 -#define EVSTATE__SEND__FAIL 13 -#define EVSTATE__SEND_EGRP 14 -#define EVSTATE__SEND_EGRP__FAIL 15 -#define EVSTATE__SEND_MULTI 16 -#define EVSTATE__SEND_MULTI__FAIL 17 -#define EVSTATE__SEND_EGRP_MULTI 18 -#define EVSTATE__SEND_EGRP_MULTI__FAIL 19 -#define EVSTATE__MARK_SEND 20 -#define EVSTATE__UNMARK_SEND 21 -#define EVSTATE__MARK_FREE 22 -#define EVSTATE__UNMARK_FREE 23 -#define EVSTATE__MARK_FREE_MULTI 24 -#define EVSTATE__UNMARK_FREE_MULTI 25 -#define EVSTATE__DISPATCH 26 -#define EVSTATE__DISPATCH_MULTI 27 -#define EVSTATE__DISPATCH_SCHED__FAIL 28 -#define EVSTATE__DISPATCH_LOCAL__FAIL 29 -#define EVSTATE__DEQUEUE 30 -#define EVSTATE__DEQUEUE_MULTI 31 -#define EVSTATE__OUTPUT 32 /* before output-queue callback-fn */ -#define EVSTATE__OUTPUT__FAIL 33 -#define EVSTATE__OUTPUT_MULTI 34 /* before output-queue callback-fn */ -#define EVSTATE__OUTPUT_MULTI__FAIL 35 -#define EVSTATE__OUTPUT_CHAINING 36 /* before event_send_device() */ -#define EVSTATE__OUTPUT_CHAINING__FAIL 37 -#define EVSTATE__OUTPUT_CHAINING_MULTI 38 /* before event_send_device_multi()*/ -#define EVSTATE__OUTPUT_CHAINING_MULTI__FAIL 39 /* before event_send_device_multi()*/ -#define EVSTATE__TMO_SET_ABS 40 -#define EVSTATE__TMO_SET_ABS__FAIL 41 -#define EVSTATE__TMO_SET_REL 42 -#define EVSTATE__TMO_SET_REL__FAIL 43 -#define EVSTATE__TMO_SET_PERIODIC 44 -#define EVSTATE__TMO_SET_PERIODIC__FAIL 45 -#define EVSTATE__TMO_CANCEL 46 -#define EVSTATE__TMO_ACK 47 -#define EVSTATE__TMO_ACK__NOSKIP 48 -#define EVSTATE__TMO_ACK__FAIL 49 -#define EVSTATE__TMO_DELETE 50 -#define EVSTATE__AG_DELETE 51 -#define EVSTATE__TERM_CORE__QUEUE_LOCAL 52 -#define EVSTATE__TERM 53 -#define EVSTATE__LAST 54 /* Must be largest number! */ - -/** - * Init values for the event-state counters 'free_cnt' and 'send_cnt'. - * - * The counters are 32-bit but are updated as one combined 64-bit atomic var, - * thus the init values are in the middle of the u32-range to avoid wraparounds - * when decrementing below '0'. - */ -#define FREE_CNT_INIT ((uint16_t)0x0100) /* = 0 + 'offset' */ -#define SEND_CNT_INIT ((uint16_t)0x0100) /* = 0 + 'offset' */ -/** Initial event generation value */ -#define EVGEN_INIT ((uint16_t)1) -/** Max evgen value before resetting to 'EVGEN_INIT' to avoid wrap */ -#define EVGEN_MAX ((uint16_t)UINT16_MAX - 0x1000) - -/** - * Return 'true' if ESV is enabled - * - * - EM_ESV_ENABLE is set via the 'configure' script: --enable/disable-esv - * - esv.enable' is set via the EM config file (default: conf/em-odp.conf) - */ -static inline bool esv_enabled(void) -{ - return EM_ESV_ENABLE && em_shm->opt.esv.enable; -} - -/** - * Init ESV (if enabled at compile time), read config options - */ -em_status_t esv_init(void); -/** - * Set the initial event state during em_pool_create() when preallocating events - */ -em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr); -/** - * Set the initial event state during em_alloc() - */ -em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr); -/** - * Set the initial state of multiple events during em_alloc_multi() - */ -void evstate_alloc_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num); -/** - * Check & update event state during em_event_clone() - */ -em_event_t evstate_clone(const em_event_t event, event_hdr_t *const ev_hdr); - -/** - * Set the initial state for an event - * (e.g. an new odp-event converted into an EM-event) - */ -em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, - bool is_extev); -/** - * Set the initial state for events - * (e.g. new odp-events converted into EM-events) - */ -void evstate_init_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - bool is_extev); - -/** - * Update the state for external events input into EM. - * Used when esv.prealloc_pools = true and the input event was allocated - * externally to EM (e.g. by ODP) but from an EM event-pool. - */ -em_event_t evstate_update(const em_event_t event, - event_hdr_t *const ev_hdr, bool is_extev); - -/** - * Check & update event state during em_free() or em_event_mark_free() - */ -void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op); -/** - * Check & update event state during em_event_unmark_free() - */ -void evstate_free_revert(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op); - -/** - * Check & update the state of multiple events during em_free_multi() or - * em_event_mark_free_multi() - */ -void evstate_free_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op); -/** - * Check & update event state during em_event_unmark_free_multi() - */ -void evstate_free_revert_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op); -/** - * Check & update event state - event passed from EM to user. - * - * em_dispatch(), em_queue_dequeue(), em_tmo_cancel(), em_tmo_delete() - */ -em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op); -/** - * Revert EM-to-user event-state update on failed operation. - */ -em_event_t evstate_em2usr_revert(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op); -/** - * Check & update the state of multiple events - events passed from EM to user - * - * em_dispatch(), em_queue_dequeue_multi(), em_term() - */ -void evstate_em2usr_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op); -/** - * Revert EM-to-user event-state updates on failed operation. - */ -void evstate_em2usr_revert_multi(em_event_t ev_tbl[/*in/out*/], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op); -/** - * Check & update event state - event passed from the user to EM. - * - * em_send(), em_send_group(), em_tmo_set_abs/rel/periodic(), em_tmo_ack() - */ -void evstate_usr2em(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op); -/** - * Revert user-to-EM event-state update on failed operation. - */ -void evstate_usr2em_revert(em_event_t event, event_hdr_t *const ev_hdr, - const uint16_t api_op); -/** - * Check & update the state of multiple events - events passed from user to EM - * - * em_send_multi(), em_send_group_multi() - */ -void evstate_usr2em_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op); -/** - * Revert user-to-EM event-state updates on failed operation. - */ -void evstate_usr2em_revert_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num, - const uint16_t api_op); -/** - * Check & update event state during em_event_unmark_send() - * - * Wrapper function for evstate_usr2em_revert(..., EVSTATE__UNMARK_SEND) with - * extra error checks. - */ -void evstate_unmark_send(const em_event_t event, event_hdr_t *const ev_hdr); - -/** - * Check & update event state during em_event_unmark_free() - * - * Wrapper function for evstate_free_revert(..., EVSTATE__UNMARK_FREE) with - * extra error checks. - */ -void evstate_unmark_free(const em_event_t event, event_hdr_t *const ev_hdr); - -/** - * Check & update event state for multiple events during - * em_event_unmark_free_multi() - * - * Wrapper function for - * evstate_free_revert_multi(..., EVSTATE__UNMARK_FREE_MULTI) - * with extra error checks. - */ -void evstate_unmark_free_multi(const em_event_t ev_tbl[], - event_hdr_t *const ev_hdr_tbl[], const int num); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EVENT_CHECKS_H_ */ +/* + * Copyright (c) 2020, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM event state verification support + */ + +#ifndef EM_EVENT_CHECKS_H_ +#define EM_EVENT_CHECKS_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define EVSTATE__UNDEF 0 +#define EVSTATE__PREALLOC 1 +#define EVSTATE__ALLOC 2 +#define EVSTATE__ALLOC_MULTI 3 +#define EVSTATE__EVENT_CLONE 4 +#define EVSTATE__EVENT_REF 5 +#define EVSTATE__FREE 6 +#define EVSTATE__FREE_MULTI 7 +#define EVSTATE__EVENT_VECTOR_FREE 8 +#define EVSTATE__INIT 9 +#define EVSTATE__INIT_MULTI 10 +#define EVSTATE__INIT_EXTEV 11 +#define EVSTATE__INIT_EXTEV_MULTI 12 +#define EVSTATE__UPDATE_EXTEV 13 +#define EVSTATE__SEND 14 +#define EVSTATE__SEND__FAIL 15 +#define EVSTATE__SEND_EGRP 16 +#define EVSTATE__SEND_EGRP__FAIL 17 +#define EVSTATE__SEND_MULTI 18 +#define EVSTATE__SEND_MULTI__FAIL 19 +#define EVSTATE__SEND_EGRP_MULTI 20 +#define EVSTATE__SEND_EGRP_MULTI__FAIL 21 +#define EVSTATE__EO_START_SEND_BUFFERED 22 +#define EVSTATE__MARK_SEND 23 +#define EVSTATE__UNMARK_SEND 24 +#define EVSTATE__MARK_FREE 25 +#define EVSTATE__UNMARK_FREE 26 +#define EVSTATE__MARK_FREE_MULTI 27 +#define EVSTATE__UNMARK_FREE_MULTI 28 +#define EVSTATE__DISPATCH 29 +#define EVSTATE__DISPATCH_MULTI 30 +#define EVSTATE__DISPATCH_SCHED__FAIL 31 +#define EVSTATE__DISPATCH_LOCAL__FAIL 32 +#define EVSTATE__DEQUEUE 33 +#define EVSTATE__DEQUEUE_MULTI 34 +#define EVSTATE__TMO_SET_ABS 35 +#define EVSTATE__TMO_SET_ABS__FAIL 36 +#define EVSTATE__TMO_SET_REL 37 +#define EVSTATE__TMO_SET_REL__FAIL 38 +#define EVSTATE__TMO_SET_PERIODIC 39 +#define EVSTATE__TMO_SET_PERIODIC__FAIL 40 +#define EVSTATE__TMO_CANCEL 41 +#define EVSTATE__TMO_ACK 42 +#define EVSTATE__TMO_ACK__NOSKIP 43 +#define EVSTATE__TMO_ACK__FAIL 44 +#define EVSTATE__TMO_DELETE 45 +#define EVSTATE__AG_DELETE 46 +#define EVSTATE__TERM_CORE__QUEUE_LOCAL 47 +#define EVSTATE__TERM 48 +#define EVSTATE__LAST 49 /* Must be largest number! */ + +/** + * Init values for the event-state counters. + * + * The counters are 16-bit but are updated as one combined 64-bit atomic var, + * thus the init values are in the middle of the u16-range to avoid wraparounds + * when decrementing below '0'. + */ +/** Initial event generation value */ +#define EVGEN_INIT ((uint16_t)1) +/** Max evgen value before resetting to 'EVGEN_INIT' to avoid wrap */ +#define EVGEN_MAX ((uint16_t)UINT16_MAX - 0x1000) +/** Initial send count value */ +#define SEND_CNT_INIT ((uint16_t)0x8000) /* = 0 + 'offset' */ +/** Initial reference count value */ +#define REF_CNT_INIT ((uint16_t)0x8000) /* = 0 + 'offset' */ +/** Max reference count before resetting to 'REF_CNT_INIT' to avoid wrap */ +#define REF_CNT_MAX ((uint16_t)UINT16_MAX - 0x1000) + +/** + * Return 'true' if ESV is enabled + * + * - EM_ESV_ENABLE is set via the 'configure' script: --enable/disable-esv + * - esv.enable' is set via the EM config file (default: conf/em-odp.conf) + */ +static inline bool esv_enabled(void) +{ + return EM_ESV_ENABLE && em_shm->opt.esv.enable; +} + +/** + * Init ESV (if enabled at compile time), read config options + */ +em_status_t esv_init(void); +/** + * Set the initial event state during em_pool_create() when preallocating events + */ +em_event_t evstate_prealloc(const em_event_t event, event_hdr_t *const ev_hdr); +/** + * Set the initial event state during em_alloc() + */ +em_event_t evstate_alloc(const em_event_t event, event_hdr_t *const ev_hdr); +/** + * Set the initial state of multiple events during em_alloc_multi() + */ +void evstate_alloc_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num); +/** + * Check & update event state during em_event_clone() + */ +em_event_t evstate_clone(const em_event_t event, event_hdr_t *const ev_hdr); + +/** + * Update event state during em_event_ref() + */ +em_event_t evstate_ref(const em_event_t event, event_hdr_t *const ev_hdr); + +/** + * Set the initial state for an event + * (e.g. an new odp-event converted into an EM-event) + */ +em_event_t evstate_init(const em_event_t event, event_hdr_t *const ev_hdr, + bool is_extev); +/** + * Set the initial state for events + * (e.g. new odp-events converted into EM-events) + */ +void evstate_init_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + bool is_extev); + +/** + * Update the state for external events input into EM. + * Used when esv.prealloc_pools = true and the input event was allocated + * externally to EM (e.g. by ODP) but from an EM event-pool. + */ +em_event_t evstate_update(const em_event_t event, + event_hdr_t *const ev_hdr, bool is_extev); + +/** + * Check & update event state during em_free() or em_event_mark_free() + */ +void evstate_free(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op); +/** + * Check & update event state during em_event_unmark_free() + */ +void evstate_free_revert(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op); + +/** + * Check & update the state of multiple events during em_free_multi() or + * em_event_mark_free_multi() + */ +void evstate_free_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op); +/** + * Check & update event state during em_event_unmark_free_multi() + */ +void evstate_free_revert_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op); +/** + * Check & update event state - event passed from EM to user. + * + * em_dispatch(), em_queue_dequeue(), em_tmo_cancel(), em_tmo_delete() + */ +em_event_t evstate_em2usr(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op); +/** + * Revert EM-to-user event-state update on failed operation. + */ +em_event_t evstate_em2usr_revert(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op); +/** + * Check & update the state of multiple events - events passed from EM to user + * + * em_dispatch(), em_queue_dequeue_multi(), em_term() + */ +void evstate_em2usr_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op); +/** + * Revert EM-to-user event-state updates on failed operation. + */ +void evstate_em2usr_revert_multi(em_event_t ev_tbl[/*in/out*/], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op); +/** + * Check & update event state - event passed from the user to EM. + * + * em_send(), em_send_group(), em_tmo_set_abs/rel/periodic(), em_tmo_ack() + */ +void evstate_usr2em(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op); +/** + * Revert user-to-EM event-state update on failed operation. + */ +void evstate_usr2em_revert(em_event_t event, event_hdr_t *const ev_hdr, + const uint16_t api_op); +/** + * Check & update the state of multiple events - events passed from user to EM + * + * em_send_multi(), em_send_group_multi() + */ +void evstate_usr2em_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op); +/** + * Revert user-to-EM event-state updates on failed operation. + */ +void evstate_usr2em_revert_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num, + const uint16_t api_op); +/** + * Check & update event state during em_event_unmark_send() + * + * Wrapper function for evstate_usr2em_revert(..., EVSTATE__UNMARK_SEND) with + * extra error checks. + */ +void evstate_unmark_send(const em_event_t event, event_hdr_t *const ev_hdr); + +/** + * Check & update event state during em_event_unmark_free() + * + * Wrapper function for evstate_free_revert(..., EVSTATE__UNMARK_FREE) with + * extra error checks. + */ +void evstate_unmark_free(const em_event_t event, event_hdr_t *const ev_hdr); + +/** + * Check & update event state for multiple events during + * em_event_unmark_free_multi() + * + * Wrapper function for + * evstate_free_revert_multi(..., EVSTATE__UNMARK_FREE_MULTI) + * with extra error checks. + */ +void evstate_unmark_free_multi(const em_event_t ev_tbl[], + event_hdr_t *const ev_hdr_tbl[], const int num); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EVENT_CHECKS_H_ */ diff --git a/src/em_event_types.h b/src/em_event_types.h index f5e62be6..86fb011f 100644 --- a/src/em_event_types.h +++ b/src/em_event_types.h @@ -1,271 +1,325 @@ -/* - * Copyright (c) 2015-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * EM internal event types & definitions - * - */ - -#ifndef EM_EVENT_TYPES_H_ -#define EM_EVENT_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -COMPILE_TIME_ASSERT(sizeof(em_event_t) == sizeof(odp_event_t), - EM_EVENT_SIZE_MISMATCH); - -/** - * @def PKT_USERPTR_MAGIC_NBR - * - * Magic number used to detect whether the EM event-header has been initialized - * by EM in events based on odp-pkt-buffers. - * - * Set the odp-pkt user-ptr to this magic number to be able to recognize - * pkt-events that EM has created vs. pkts from pkt-input that needs their - * ev-hdrs to be initialized before further EM processing. - * - * if (odp_packet_user_ptr(odp_pkt) != PKT_USERPTR_MAGIC_NBR) { - * // Pkt from outside of EM, need to init ev_hdr - * odp_packet_user_ptr_set(odp_pkt, PKT_USERPTR_MAGIC_NBR); - * init_ev_hdr('ev_hdr in the user-area of the odp-pkt'); - * ... - * } - */ -#define PKT_USERPTR_MAGIC_NBR ((void *)(intptr_t)0xA5A5) - -/** - * Internal representation of the event handle (em_event_t) when using - * Event State Verification (ESV) - * - * An event-generation-count is encoded into the high bits of the event handle - * to catch illegal usage after the event ownership has been transferred. - * Each user-to-EM event state transition increments the .evgen and thus - * obsoletes any further use of the handle by that user. - */ -typedef union { - em_event_t event; - struct { -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - uint64_t evptr : 48; - uint64_t evgen : 16; -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ - uint64_t evgen : 16; - uint64_t evptr : 48; -#endif - }; -} evhdl_t; - -COMPILE_TIME_ASSERT(sizeof(evhdl_t) == sizeof(em_event_t), EVHDL_T_SIZE_ERROR); - -/** - * Event-state counters: 'evgen', 'free_cnt' and 'send_cnt'. - * - * Updated as one single atomic var via 'evstate_cnt_t::u64'. - */ -typedef union ODP_ALIGNED(sizeof(uint64_t)) { - uint64_t u64; /* updated atomically in the event-hdr */ - struct { - uint16_t evgen; - uint16_t rsvd; - union { - struct { - uint16_t free_cnt; - uint16_t send_cnt; - }; - uint32_t free_send_cnt; - }; - }; -} evstate_cnt_t; - -/* Verify size of struct, i.e. accept no padding */ -COMPILE_TIME_ASSERT(sizeof(evstate_cnt_t) == sizeof(uint64_t), - EVSTATE_CNT_T_SIZE_ERROR); - -/** - * Event-state information. - * Not atomically updated (but evstate_cnt_t is updated atomically) - */ -typedef struct { - /** - * Event state, updated on valid state trasitions. - * "Best effort" update, i.e. atomic update of state not - * guaranteed in invalid simultaneous state updates. - * - * Contains the previously known good state and will be - * printed when detecting an invalid state transition. - */ - em_eo_t eo; - em_queue_t queue; - /** - * EM API operation ID. - * Identifies the previously called API func that altered state - */ - uint16_t api_op; - /** EM core that called API('api_op') */ - uint16_t core; - /** - * First 'word' of the event payload as seen - * at the time of the previous state update. - */ - uint32_t payload_first; -} ev_hdr_state_t; - -/** - * Event header - * - * SW & I/O originated events. - */ -typedef struct { - /** - * Event State Verification (ESV): event state data - */ - union { - uint8_t u8[32]; - struct { - /** - * Together the evstate_cnt_t counters (evgen, free_cnt - * and send_cnt) can be used to detect invalid states - * and operations on the event, e.g.: - * double-free, double-send, send-after-free, - * free-after-send, usage-after-output, - * usage-after-timer-tmo-set/ack/cancel/delete etc. - */ - evstate_cnt_t state_cnt; - - /** - * Event state, updated on valid state transitions. - * "Best effort" update, i.e. atomic update not - * guaranteed in invalid simultaneous state-updates. - * - * Contains the previously known good state and will be - * printed when detecting an invalid state transition. - */ - ev_hdr_state_t state; - }; - }; - /** - * EO-start send event buffering, event linked-list node - */ - list_node_t start_node; - /** - * Queue element for the associated queue - * @note only used for atomic-group- or local-queues - */ - queue_elem_t *q_elem; - - union { - uint64_t all; - struct { - /** requested size (bytes) */ - uint64_t req_size : 16; - /** + padding, incl. space for align_offset (bytes) */ - uint64_t pad_size : 16; - /** user area id */ - uint64_t id : 16; - /** is the user area id set? */ - uint64_t isset_id : 1; - /** is the uarea initialized? */ - uint64_t isinit : 1; - /** reserved bits */ - uint64_t rsvd : 14; - }; - } user_area; - - /* --- CACHE LINE on systems with a 64B cache line size --- */ - - /** - * Event handle (this event) - */ - em_event_t event ODP_ALIGNED(64); - /** - * Queue handle - * @note only used by EM chaining & EO-start send event buffering - */ - em_queue_t queue; - /** - * Event Group handle - */ - em_event_group_t egrp; - /** - * Event group generation - */ - int32_t egrp_gen; - /** - * Event size - */ - uint32_t event_size; - /** - * Payload alloc alignment offset/push into free area of ev_hdr. - * Only used by events based on ODP buffers that have the ev_hdr in the - * beginning of the buf payload (pkts use 'user-area' for ev_hdr). - * Value is copied from pool_elem->align_offset for easy access. - */ - uint16_t align_offset; - /** - * Event type, contains major and minor parts - */ - em_event_type_t event_type; - - /** - * End of event header data, - * for offsetof(event_hdr_t, end_hdr_data) - */ - uint8_t end_hdr_data[0]; - - /* - * ! EMPTY SPACE ! - * Events based on odp_buffer_t only: - * - space for alignment adjustments as set by - * a) config file option - 'pool.align_offset' or - * b) pool config param - 'em_pool_cfg_t:align_offset{}' - * - space available: - * sizeof(event_hdr_t) - offsetof(event_hdr_t, end_hdr_data) - * - events based on odp_packet_t have their event header in the - * odp pkt user area and alignment is adjusted in the pkt headroom. - * - * Note: If the event user area is enabled then (for bufs) it will start - * after the event header and the align offset is not included - * in the event header but instead starts after the user area. - */ - - void *end[0] ODP_ALIGNED(64); /* pad to next 64B boundary */ -} event_hdr_t; - -COMPILE_TIME_ASSERT(sizeof(event_hdr_t) <= 128, EVENT_HDR_SIZE_ERROR); -COMPILE_TIME_ASSERT(sizeof(event_hdr_t) % 32 == 0, EVENT_HDR_SIZE_ERROR2); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_EVENT_TYPES_H_ */ +/* + * Copyright (c) 2015-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM internal event types & definitions + * + */ + +#ifndef EM_EVENT_TYPES_H_ +#define EM_EVENT_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +COMPILE_TIME_ASSERT(sizeof(em_event_t) == sizeof(odp_event_t), + EM_EVENT_SIZE_MISMATCH); + +/** + * @def PKT_USERPTR_MAGIC_NBR + * + * Magic number used to detect whether the EM event-header has been initialized + * by EM in events based on odp-pkt-buffers. + * + * Set the odp-pkt user-ptr to this magic number to be able to recognize + * pkt-events that EM has created vs. pkts from pkt-input that needs their + * ev-hdrs to be initialized before further EM processing. + * + * if (odp_packet_user_ptr(odp_pkt) != PKT_USERPTR_MAGIC_NBR) { + * // Pkt from outside of EM, need to init ev_hdr + * odp_packet_user_ptr_set(odp_pkt, PKT_USERPTR_MAGIC_NBR); + * init_ev_hdr('ev_hdr in the user-area of the odp-pkt'); + * ... + * } + */ +#define PKT_USERPTR_MAGIC_NBR ((void *)(intptr_t)0xA5A5) + +#define USER_FLAG_SET 1 + +/** + * Internal representation of the event handle (em_event_t) when using + * Event State Verification (ESV) + * + * An event-generation-count is encoded into the high bits of the event handle + * to catch illegal usage after the event ownership has been transferred. + * Each user-to-EM event state transition increments the .evgen and thus + * obsoletes any further use of the handle by that user. + */ +typedef union { + em_event_t event; + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + uint64_t evptr : 48; + uint64_t evgen : 16; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t evgen : 16; + uint64_t evptr : 48; +#endif + }; +} evhdl_t; + +COMPILE_TIME_ASSERT(sizeof(evhdl_t) == sizeof(em_event_t), EVHDL_T_SIZE_ERROR); + +/** + * Stash entry for EM Atomic Groups internal stashes and Local Queue storage + * Stash a combo of dst-queue and event as one 64-bit value into the stash. + */ +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + uint64_t evptr : 48; + uint64_t qidx : 16; +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t qidx : 16; + uint64_t evptr : 48; +#endif + }; +} stash_entry_t; + +COMPILE_TIME_ASSERT(sizeof(stash_entry_t) == sizeof(uint64_t), + STASH_ENTRY_T_SIZE_ERROR); + +/** + * Event-state counters: 'evgen', 'ref_cnt' and 'send_cnt'. + * + * Updated as one single atomic var via 'evstate_cnt_t::u64'. + */ +typedef union ODP_ALIGNED(sizeof(uint64_t)) { + uint64_t u64; /* updated atomically in the event-hdr */ + struct { + uint16_t evgen; + uint16_t rsvd; + uint16_t ref_cnt; + uint16_t send_cnt; + }; +} evstate_cnt_t; + +/* Verify size of struct, i.e. accept no padding */ +COMPILE_TIME_ASSERT(sizeof(evstate_cnt_t) == sizeof(uint64_t), + EVSTATE_CNT_T_SIZE_ERROR); + +/** + * Event-state information. + * Not atomically updated (but evstate_cnt_t is updated atomically) + */ +typedef struct { + /** + * Event state, updated on valid state trasitions. + * "Best effort" update, i.e. atomic update of state not + * guaranteed in invalid simultaneous state updates. + * + * Contains the previously known good state and will be + * printed when detecting an invalid state transition. + */ + em_eo_t eo; + em_queue_t queue; + /** + * EM API operation ID. + * Identifies the previously called API func that altered state + */ + uint16_t api_op; + /** EM core that called API('api_op') */ + uint16_t core; + /** + * First 'word' of the event payload as seen + * at the time of the previous state update. + */ + uint32_t payload_first; +} ev_hdr_state_t; + +/** + * Event header + * + * SW & I/O originated events. + */ +typedef struct event_hdr { + /** + * Event State Verification (ESV): event state data + */ + union { + uint8_t u8[32]; + struct { + /** + * Together the evstate_cnt_t counters (evgen, ref_cnt + * and send_cnt) can be used to detect invalid states + * and operations on the event, e.g.: + * double-free, double-send, send-after-free, + * free-after-send, usage-after-output, + * usage-after-timer-tmo-set/ack/cancel/delete etc. + */ + evstate_cnt_t state_cnt; + + /** + * Event state, updated on valid state transitions. + * "Best effort" update, i.e. atomic update not + * guaranteed in invalid simultaneous state-updates. + * + * Contains the previously known good state and will be + * printed when detecting an invalid state transition. + */ + ev_hdr_state_t state; + }; + }; + + /** + * Event handle (this event) + */ + em_event_t event; + + /** + * Event size + */ + uint32_t event_size; + + /** + * Event type, contains major and minor parts + */ + em_event_type_t event_type; + + /** + * Event flags + */ + union { + uint16_t all; + struct { + /** + * Indicate that this event has (or had) references and + * some of the ESV checks must be omitted (evgen). + * Will be set for the whole lifetime of event. + */ + uint16_t refs_used : 1; + /** reserved bits */ + uint16_t rsvd : 15; + }; + } flags; + + /** + * Payload alloc alignment offset/push into free area of ev_hdr. + * Only used by events based on ODP buffers that have the ev_hdr in the + * beginning of the buf payload (pkts use 'user-area' for ev_hdr). + * Value is copied from pool_elem->align_offset for easy access. + */ + uint16_t align_offset; + + /** + * Event group generation + */ + int32_t egrp_gen; + + /** + * Event Group handle (cannot be used by event references) + */ + em_event_group_t egrp; + + /* --- CACHE LINE on systems with a 64B cache line size --- */ + + union { + uint64_t all; + struct { + /** requested size (bytes) */ + uint64_t req_size : 16; + /** + padding, incl. space for align_offset (bytes) */ + uint64_t pad_size : 16; + /** user area id */ + uint64_t id : 16; + /** is the user area id set? */ + uint64_t isset_id : 1; + /** is the uarea initialized? */ + uint64_t isinit : 1; + /** reserved bits */ + uint64_t rsvd : 14; + }; + } user_area; + + /** + * End of event header data, + * for offsetof(event_hdr_t, end_hdr_data) + */ + uint8_t end_hdr_data[0]; + + /* + * ! EMPTY SPACE ! + * Events based on odp_buffer_t only: + * - space for alignment adjustments as set by + * a) config file option - 'pool.align_offset' or + * b) pool config param - 'em_pool_cfg_t:align_offset{}' + * - space available: + * sizeof(event_hdr_t) - offsetof(event_hdr_t, end_hdr_data) + * - events based on odp_packet_t have their event header in the + * odp pkt user area and alignment is adjusted in the pkt headroom. + * + * Note: If the event user area is enabled then (for bufs) it will start + * after the event header and the align offset is not included + * in the event header but instead starts after the user area. + */ + + void *end[0] ODP_ALIGNED(64); /* pad to next 64B boundary */ +} event_hdr_t; + +COMPILE_TIME_ASSERT(sizeof(event_hdr_t) <= 128, EVENT_HDR_SIZE_ERROR); +COMPILE_TIME_ASSERT(sizeof(event_hdr_t) % 32 == 0, EVENT_HDR_SIZE_ERROR2); + +/** + * Event header used only when pre-allocating the pool during pool creation to + * be able to link all the event headers together into a linked list. + * Make sure not to overwrite the event state information in the header with the + * linked list information. + */ +typedef union event_prealloc_hdr { + event_hdr_t ev_hdr; + + struct { + uint8_t u8[sizeof(event_hdr_t) - sizeof(list_node_t)]; + /** + * Pool pre-allocation: allocate and link each event in the pool into a + * linked list to be able to initialize the event state into a known + * state for ESV. + */ + list_node_t list_node; + }; +} event_prealloc_hdr_t; + +COMPILE_TIME_ASSERT(sizeof(event_prealloc_hdr_t) == sizeof(event_hdr_t), + EVENT_PREALLOC_HDR_SIZE_ERROR); +COMPILE_TIME_ASSERT(offsetof(event_prealloc_hdr_t, list_node) > + offsetof(event_hdr_t, state) + sizeof(ev_hdr_state_t), + EVENT_PREALLOC_HDR_SIZE_ERROR2); +COMPILE_TIME_ASSERT(offsetof(event_prealloc_hdr_t, list_node) > + offsetof(event_hdr_t, event) + sizeof(em_event_t), + EVENT_PREALLOC_HDR_SIZE_ERROR3); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_EVENT_TYPES_H_ */ diff --git a/src/em_hook_types.h b/src/em_hook_types.h index 9b1d4ecc..7dda92c6 100644 --- a/src/em_hook_types.h +++ b/src/em_hook_types.h @@ -1,103 +1,110 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal API hook types & definitions - * - */ - -#ifndef EM_HOOK_TYPES_H_ -#define EM_HOOK_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -/* Max number of API-callback hook arrays */ -#define API_HOOKS_MAX_TBL_SIZE 255 - -/* EM API hook function types */ -#define ALLOC_HOOK 1 -#define FREE_HOOK 2 -#define SEND_HOOK 3 -/* Dispatcher callback function types */ -#define DISPATCH_CALLBACK_ENTER 4 -#define DISPATCH_CALLBACK_EXIT 5 - -typedef void (*void_hook_t)(void); - -typedef union { - em_api_hook_alloc_t alloc; - em_api_hook_free_t free; - em_api_hook_send_t send; - em_dispatch_enter_func_t disp_enter; - em_dispatch_exit_func_t disp_exit; - void_hook_t void_hook; -} hook_fn_t; - -/** - * Table for storing API-callback hook function pointers. - */ -typedef struct { - /* Hook function table */ - hook_fn_t tbl[EM_CALLBACKS_MAX]; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} hook_tbl_t; - -COMPILE_TIME_ASSERT(sizeof(hook_tbl_t) == ENV_CACHE_LINE_SIZE, - HOOK_ALIGNMENT_ERROR); - -/** - * API-callback hook functions (table of tables) - */ -typedef struct { - /** Storage for multiple hook function tables */ - hook_tbl_t hook_tbl_storage[API_HOOKS_MAX_TBL_SIZE]; - /** Callback table edit lock */ - env_spinlock_t lock; - /** Index of the current active callback table */ - int idx; - /* Pad size to a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} hook_storage_t; - -COMPILE_TIME_ASSERT(sizeof(hook_storage_t) % ENV_CACHE_LINE_SIZE == 0, - HOOK_STORAGE_ALIGNMENT_ERROR); - -COMPILE_TIME_ASSERT(sizeof(hook_storage_t) / ENV_CACHE_LINE_SIZE == - API_HOOKS_MAX_TBL_SIZE + 1, HOOK_STORAGE_SIZE_ERROR); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_HOOK_TYPES_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal API hook types & definitions + * + */ + +#ifndef EM_HOOK_TYPES_H_ +#define EM_HOOK_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* Max number of API-callback hook arrays */ +#define API_HOOKS_MAX_TBL_SIZE 255 + +/* EM API hook function types */ +#define ALLOC_HOOK 1 +#define FREE_HOOK 2 +#define SEND_HOOK 3 +/* Dispatcher callback function types */ +#define DISPATCH_CALLBACK_ENTER 4 +#define DISPATCH_CALLBACK_EXIT 5 +/* Idle hook function types */ +#define TO_IDLE_HOOK 6 +#define TO_ACTIVE_HOOK 7 +#define WHILE_IDLE_HOOK 8 + +typedef void (*void_hook_t)(void); + +typedef union { + em_api_hook_alloc_t alloc; + em_api_hook_free_t free; + em_api_hook_send_t send; + em_dispatch_enter_func_t disp_enter; + em_dispatch_exit_func_t disp_exit; + em_idle_hook_to_idle_t to_idle; + em_idle_hook_to_active_t to_active; + em_idle_hook_while_idle_t while_idle; + void_hook_t void_hook; +} hook_fn_t; + +/** + * Table for storing API-callback hook function pointers. + */ +typedef struct { + /* Hook function table */ + hook_fn_t tbl[EM_CALLBACKS_MAX]; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} hook_tbl_t; + +COMPILE_TIME_ASSERT(sizeof(hook_tbl_t) == ENV_CACHE_LINE_SIZE, + HOOK_ALIGNMENT_ERROR); + +/** + * API-callback hook functions (table of tables) + */ +typedef struct { + /** Storage for multiple hook function tables */ + hook_tbl_t hook_tbl_storage[API_HOOKS_MAX_TBL_SIZE]; + /** Callback table edit lock */ + env_spinlock_t lock; + /** Index of the current active callback table */ + int idx; + /* Pad size to a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} hook_storage_t; + +COMPILE_TIME_ASSERT(sizeof(hook_storage_t) % ENV_CACHE_LINE_SIZE == 0, + HOOK_STORAGE_ALIGNMENT_ERROR); + +COMPILE_TIME_ASSERT(sizeof(hook_storage_t) / ENV_CACHE_LINE_SIZE == + API_HOOKS_MAX_TBL_SIZE + 1, HOOK_STORAGE_SIZE_ERROR); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_HOOK_TYPES_H_ */ diff --git a/src/em_hooks.c b/src/em_hooks.c index fd427239..4642adb8 100644 --- a/src/em_hooks.c +++ b/src/em_hooks.c @@ -1,271 +1,319 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -static hook_tbl_t ** -get_hook_tbl(const uint8_t hook_type, hook_storage_t **hook_storage /*out*/); - -/** - * Pack a hook table after removing one item. - * - * Move pointers following the given index back towards the beginning of the - * table so that there are no NULL pointers in the middle. - */ -static inline int -pack_hook_tbl(hook_tbl_t hook_tbl[], unsigned int idx); - -em_status_t -hooks_init(const em_api_hooks_t *api_hooks) -{ - em_status_t stat = EM_OK; - - EM_PRINT("EM callbacks init\n"); - - memset(&em_shm->dispatch_enter_cb_storage, 0, sizeof(hook_storage_t)); - memset(&em_shm->dispatch_exit_cb_storage, 0, sizeof(hook_storage_t)); - memset(&em_shm->alloc_hook_storage, 0, sizeof(hook_storage_t)); - memset(&em_shm->free_hook_storage, 0, sizeof(hook_storage_t)); - memset(&em_shm->send_hook_storage, 0, sizeof(hook_storage_t)); - - em_shm->dispatch_enter_cb_tbl = - &em_shm->dispatch_enter_cb_storage.hook_tbl_storage[0]; - em_shm->dispatch_exit_cb_tbl = - &em_shm->dispatch_exit_cb_storage.hook_tbl_storage[0]; - em_shm->alloc_hook_tbl = - &em_shm->alloc_hook_storage.hook_tbl_storage[0]; - em_shm->free_hook_tbl = - &em_shm->free_hook_storage.hook_tbl_storage[0]; - em_shm->send_hook_tbl = - &em_shm->send_hook_storage.hook_tbl_storage[0]; - - em_shm->dispatch_enter_cb_storage.idx = 0; - em_shm->dispatch_exit_cb_storage.idx = 0; - em_shm->alloc_hook_storage.idx = 0; - em_shm->free_hook_storage.idx = 0; - em_shm->send_hook_storage.idx = 0; - - env_spinlock_init(&em_shm->dispatch_enter_cb_storage.lock); - env_spinlock_init(&em_shm->dispatch_exit_cb_storage.lock); - env_spinlock_init(&em_shm->alloc_hook_storage.lock); - env_spinlock_init(&em_shm->free_hook_storage.lock); - env_spinlock_init(&em_shm->send_hook_storage.lock); - - if (EM_API_HOOKS_ENABLE) { - if (api_hooks->alloc_hook) { - stat = em_hooks_register_alloc(api_hooks->alloc_hook); - if (unlikely(stat != EM_OK)) - return stat; - } - if (api_hooks->free_hook) { - stat = em_hooks_register_free(api_hooks->free_hook); - if (unlikely(stat != EM_OK)) - return stat; - } - if (api_hooks->send_hook) { - stat = em_hooks_register_send(api_hooks->send_hook); - if (unlikely(stat != EM_OK)) - return stat; - } - } - - return EM_OK; -} - -em_status_t -hook_register(uint8_t hook_type, hook_fn_t hook_fn) -{ - hook_storage_t *hook_storage; - const hook_tbl_t *hook_tbl; - hook_tbl_t *next_tbl; - hook_tbl_t **active_tbl_ptr; - int idx; - int next_idx; - int i; - - /* Get the em_shm hook table and hook storage to update */ - active_tbl_ptr = get_hook_tbl(hook_type, &hook_storage/*out*/); - if (unlikely(active_tbl_ptr == NULL)) - return EM_ERR_BAD_ID; - - env_spinlock_lock(&hook_storage->lock); - - /* TODO: Check that no thread is still using the new memory area */ - - idx = hook_storage->idx; - next_idx = idx + 1; - if (next_idx >= API_HOOKS_MAX_TBL_SIZE) - next_idx = 0; - hook_tbl = &hook_storage->hook_tbl_storage[idx]; - next_tbl = &hook_storage->hook_tbl_storage[next_idx]; - - /* - * Copy old callback functions and find the index - * of the new function pointer. - */ - memcpy(next_tbl->tbl, hook_tbl->tbl, sizeof(next_tbl->tbl)); - - for (i = 0; i < EM_CALLBACKS_MAX; i++) { - if (next_tbl->tbl[i].void_hook == NULL) - break; - } - if (unlikely(i == EM_CALLBACKS_MAX)) { - env_spinlock_unlock(&hook_storage->lock); - return EM_ERR_ALLOC_FAILED; - } - /* Add new callback */ - next_tbl->tbl[i] = hook_fn; - - /* move the active hook tbl to the new tbl */ - *active_tbl_ptr = next_tbl; /* em_shm->..._hook_tbl = next_tbl */ - - hook_storage->idx = next_idx; - - env_spinlock_unlock(&hook_storage->lock); - - return EM_OK; -} - -em_status_t -hook_unregister(uint8_t hook_type, hook_fn_t hook_fn) -{ - hook_storage_t *hook_storage; - const hook_tbl_t *hook_tbl; - hook_tbl_t *next_tbl; - hook_tbl_t **active_tbl_ptr; - int idx; - int next_idx; - int ret; - int i; - - active_tbl_ptr = get_hook_tbl(hook_type, &hook_storage/*out*/); - if (unlikely(active_tbl_ptr == NULL)) - return EM_ERR_BAD_ID; - - env_spinlock_lock(&hook_storage->lock); - - /* TODO: Check that no thread is still using the new memory area */ - - idx = hook_storage->idx; - next_idx = idx + 1; - if (next_idx >= API_HOOKS_MAX_TBL_SIZE) - next_idx = 0; - hook_tbl = &hook_storage->hook_tbl_storage[idx]; - next_tbl = &hook_storage->hook_tbl_storage[next_idx]; - - /* - * Copy old callback functions and try to find matching - * function pointer. - */ - memcpy(next_tbl->tbl, hook_tbl->tbl, sizeof(next_tbl->tbl)); - - for (i = 0; i < EM_CALLBACKS_MAX; i++) - if (next_tbl->tbl[i].void_hook == hook_fn.void_hook) - break; - if (unlikely(i == EM_CALLBACKS_MAX)) { - env_spinlock_unlock(&hook_storage->lock); - return EM_ERR_NOT_FOUND; - } - - /* - * Remove a pointer and move the following array entries backwards - * and set callback array pointer to the beginning of the new array. - */ - next_tbl->tbl[i].void_hook = NULL; - ret = pack_hook_tbl(next_tbl, i); - if (unlikely(ret != 0)) { - env_spinlock_unlock(&hook_storage->lock); - return EM_ERR_BAD_POINTER; - } - - /* move the active hook tbl to the new tbl */ - *active_tbl_ptr = next_tbl; /* em_shm->..._hook_tbl = next_tbl */ - - hook_storage->idx = next_idx; - - env_spinlock_unlock(&hook_storage->lock); - - return EM_OK; -} - -static hook_tbl_t ** -get_hook_tbl(const uint8_t hook_type, hook_storage_t **hook_storage /*out*/) -{ - hook_tbl_t **active_tbl_ptr; - - switch (hook_type) { - case ALLOC_HOOK: - *hook_storage = &em_shm->alloc_hook_storage; - active_tbl_ptr = &em_shm->alloc_hook_tbl; - break; - case FREE_HOOK: - *hook_storage = &em_shm->free_hook_storage; - active_tbl_ptr = &em_shm->free_hook_tbl; - break; - case SEND_HOOK: - *hook_storage = &em_shm->send_hook_storage; - active_tbl_ptr = &em_shm->send_hook_tbl; - break; - case DISPATCH_CALLBACK_ENTER: - *hook_storage = &em_shm->dispatch_enter_cb_storage; - active_tbl_ptr = &em_shm->dispatch_enter_cb_tbl; - break; - case DISPATCH_CALLBACK_EXIT: - *hook_storage = &em_shm->dispatch_exit_cb_storage; - active_tbl_ptr = &em_shm->dispatch_exit_cb_tbl; - break; - default: - return NULL; - } - - return active_tbl_ptr; -} - -static inline int -pack_hook_tbl(hook_tbl_t *const hook_tbl, unsigned int idx) -{ - hook_fn_t *const fn_tbl = hook_tbl->tbl; - unsigned int i; - - if (unlikely(idx >= EM_CALLBACKS_MAX || - fn_tbl[idx].void_hook != NULL)) - return -1; - - for (i = idx; i < EM_CALLBACKS_MAX - 1; i++) { - if (fn_tbl[i + 1].void_hook != NULL) { - fn_tbl[i].void_hook = fn_tbl[i + 1].void_hook; - fn_tbl[i + 1].void_hook = NULL; - } else { - break; - } - } - - return 0; -} +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +static hook_tbl_t ** +get_hook_tbl(const uint8_t hook_type, hook_storage_t **hook_storage /*out*/); + +/** + * Pack a hook table after removing one item. + * + * Move pointers following the given index back towards the beginning of the + * table so that there are no NULL pointers in the middle. + */ +static inline int +pack_hook_tbl(hook_tbl_t hook_tbl[], unsigned int idx); + +em_status_t +hooks_init(const em_api_hooks_t *api_hooks, const em_idle_hooks_t *idle_hooks) +{ + em_status_t stat = EM_OK; + + EM_PRINT("EM callbacks init\n"); + + memset(&em_shm->dispatch_enter_cb_storage, 0, sizeof(hook_storage_t)); + memset(&em_shm->dispatch_exit_cb_storage, 0, sizeof(hook_storage_t)); + memset(&em_shm->alloc_hook_storage, 0, sizeof(hook_storage_t)); + memset(&em_shm->free_hook_storage, 0, sizeof(hook_storage_t)); + memset(&em_shm->send_hook_storage, 0, sizeof(hook_storage_t)); + + memset(&em_shm->to_idle_hook_storage, 0, sizeof(hook_storage_t)); + memset(&em_shm->to_active_hook_storage, 0, sizeof(hook_storage_t)); + memset(&em_shm->while_idle_hook_storage, 0, sizeof(hook_storage_t)); + + em_shm->dispatch_enter_cb_tbl = + &em_shm->dispatch_enter_cb_storage.hook_tbl_storage[0]; + em_shm->dispatch_exit_cb_tbl = + &em_shm->dispatch_exit_cb_storage.hook_tbl_storage[0]; + em_shm->alloc_hook_tbl = + &em_shm->alloc_hook_storage.hook_tbl_storage[0]; + em_shm->free_hook_tbl = + &em_shm->free_hook_storage.hook_tbl_storage[0]; + em_shm->send_hook_tbl = + &em_shm->send_hook_storage.hook_tbl_storage[0]; + + em_shm->to_idle_hook_tbl = + &em_shm->to_idle_hook_storage.hook_tbl_storage[0]; + em_shm->to_active_hook_tbl = + &em_shm->to_active_hook_storage.hook_tbl_storage[0]; + em_shm->while_idle_hook_tbl = + &em_shm->while_idle_hook_storage.hook_tbl_storage[0]; + + em_shm->dispatch_enter_cb_storage.idx = 0; + em_shm->dispatch_exit_cb_storage.idx = 0; + em_shm->alloc_hook_storage.idx = 0; + em_shm->free_hook_storage.idx = 0; + em_shm->send_hook_storage.idx = 0; + + em_shm->to_idle_hook_storage.idx = 0; + em_shm->to_active_hook_storage.idx = 0; + em_shm->while_idle_hook_storage.idx = 0; + + env_spinlock_init(&em_shm->dispatch_enter_cb_storage.lock); + env_spinlock_init(&em_shm->dispatch_exit_cb_storage.lock); + env_spinlock_init(&em_shm->alloc_hook_storage.lock); + env_spinlock_init(&em_shm->free_hook_storage.lock); + env_spinlock_init(&em_shm->send_hook_storage.lock); + + env_spinlock_init(&em_shm->to_idle_hook_storage.lock); + env_spinlock_init(&em_shm->to_active_hook_storage.lock); + env_spinlock_init(&em_shm->while_idle_hook_storage.lock); + + if (EM_API_HOOKS_ENABLE) { + if (api_hooks->alloc_hook) { + stat = em_hooks_register_alloc(api_hooks->alloc_hook); + if (unlikely(stat != EM_OK)) + return stat; + } + if (api_hooks->free_hook) { + stat = em_hooks_register_free(api_hooks->free_hook); + if (unlikely(stat != EM_OK)) + return stat; + } + if (api_hooks->send_hook) { + stat = em_hooks_register_send(api_hooks->send_hook); + if (unlikely(stat != EM_OK)) + return stat; + } + } + + if (EM_IDLE_HOOKS_ENABLE) { + if (idle_hooks->to_idle_hook) { + stat = em_hooks_register_to_idle(idle_hooks->to_idle_hook); + if (unlikely(stat != EM_OK)) + return stat; + } + if (idle_hooks->to_active_hook) { + stat = em_hooks_register_to_active(idle_hooks->to_active_hook); + if (unlikely(stat != EM_OK)) + return stat; + } + if (idle_hooks->while_idle_hook) { + stat = em_hooks_register_while_idle(idle_hooks->while_idle_hook); + if (unlikely(stat != EM_OK)) + return stat; + } + } + + return EM_OK; +} + +em_status_t +hook_register(uint8_t hook_type, hook_fn_t hook_fn) +{ + hook_storage_t *hook_storage; + const hook_tbl_t *hook_tbl; + hook_tbl_t *next_tbl; + hook_tbl_t **active_tbl_ptr; + int idx; + int next_idx; + int i; + + /* Get the em_shm hook table and hook storage to update */ + active_tbl_ptr = get_hook_tbl(hook_type, &hook_storage/*out*/); + if (unlikely(active_tbl_ptr == NULL)) + return EM_ERR_BAD_ID; + + env_spinlock_lock(&hook_storage->lock); + + /* TODO: Check that no thread is still using the new memory area */ + + idx = hook_storage->idx; + next_idx = idx + 1; + if (next_idx >= API_HOOKS_MAX_TBL_SIZE) + next_idx = 0; + hook_tbl = &hook_storage->hook_tbl_storage[idx]; + next_tbl = &hook_storage->hook_tbl_storage[next_idx]; + + /* + * Copy old callback functions and find the index + * of the new function pointer. + */ + memcpy(next_tbl->tbl, hook_tbl->tbl, sizeof(next_tbl->tbl)); + + for (i = 0; i < EM_CALLBACKS_MAX; i++) { + if (next_tbl->tbl[i].void_hook == NULL) + break; + } + if (unlikely(i == EM_CALLBACKS_MAX)) { + env_spinlock_unlock(&hook_storage->lock); + return EM_ERR_ALLOC_FAILED; + } + /* Add new callback */ + next_tbl->tbl[i] = hook_fn; + + /* move the active hook tbl to the new tbl */ + *active_tbl_ptr = next_tbl; /* em_shm->..._hook_tbl = next_tbl */ + + hook_storage->idx = next_idx; + + env_spinlock_unlock(&hook_storage->lock); + + return EM_OK; +} + +em_status_t +hook_unregister(uint8_t hook_type, hook_fn_t hook_fn) +{ + hook_storage_t *hook_storage; + const hook_tbl_t *hook_tbl; + hook_tbl_t *next_tbl; + hook_tbl_t **active_tbl_ptr; + int idx; + int next_idx; + int ret; + int i; + + active_tbl_ptr = get_hook_tbl(hook_type, &hook_storage/*out*/); + if (unlikely(active_tbl_ptr == NULL)) + return EM_ERR_BAD_ID; + + env_spinlock_lock(&hook_storage->lock); + + /* TODO: Check that no thread is still using the new memory area */ + + idx = hook_storage->idx; + next_idx = idx + 1; + if (next_idx >= API_HOOKS_MAX_TBL_SIZE) + next_idx = 0; + hook_tbl = &hook_storage->hook_tbl_storage[idx]; + next_tbl = &hook_storage->hook_tbl_storage[next_idx]; + + /* + * Copy old callback functions and try to find matching + * function pointer. + */ + memcpy(next_tbl->tbl, hook_tbl->tbl, sizeof(next_tbl->tbl)); + + for (i = 0; i < EM_CALLBACKS_MAX; i++) + if (next_tbl->tbl[i].void_hook == hook_fn.void_hook) + break; + if (unlikely(i == EM_CALLBACKS_MAX)) { + env_spinlock_unlock(&hook_storage->lock); + return EM_ERR_NOT_FOUND; + } + + /* + * Remove a pointer and move the following array entries backwards + * and set callback array pointer to the beginning of the new array. + */ + next_tbl->tbl[i].void_hook = NULL; + ret = pack_hook_tbl(next_tbl, i); + if (unlikely(ret != 0)) { + env_spinlock_unlock(&hook_storage->lock); + return EM_ERR_BAD_POINTER; + } + + /* move the active hook tbl to the new tbl */ + *active_tbl_ptr = next_tbl; /* em_shm->..._hook_tbl = next_tbl */ + + hook_storage->idx = next_idx; + + env_spinlock_unlock(&hook_storage->lock); + + return EM_OK; +} + +static hook_tbl_t ** +get_hook_tbl(const uint8_t hook_type, hook_storage_t **hook_storage /*out*/) +{ + hook_tbl_t **active_tbl_ptr; + + switch (hook_type) { + case ALLOC_HOOK: + *hook_storage = &em_shm->alloc_hook_storage; + active_tbl_ptr = &em_shm->alloc_hook_tbl; + break; + case FREE_HOOK: + *hook_storage = &em_shm->free_hook_storage; + active_tbl_ptr = &em_shm->free_hook_tbl; + break; + case SEND_HOOK: + *hook_storage = &em_shm->send_hook_storage; + active_tbl_ptr = &em_shm->send_hook_tbl; + break; + case DISPATCH_CALLBACK_ENTER: + *hook_storage = &em_shm->dispatch_enter_cb_storage; + active_tbl_ptr = &em_shm->dispatch_enter_cb_tbl; + break; + case DISPATCH_CALLBACK_EXIT: + *hook_storage = &em_shm->dispatch_exit_cb_storage; + active_tbl_ptr = &em_shm->dispatch_exit_cb_tbl; + break; + case TO_IDLE_HOOK: + *hook_storage = &em_shm->to_idle_hook_storage; + active_tbl_ptr = &em_shm->to_idle_hook_tbl; + break; + case TO_ACTIVE_HOOK: + *hook_storage = &em_shm->to_active_hook_storage; + active_tbl_ptr = &em_shm->to_active_hook_tbl; + break; + case WHILE_IDLE_HOOK: + *hook_storage = &em_shm->while_idle_hook_storage; + active_tbl_ptr = &em_shm->while_idle_hook_tbl; + break; + default: + return NULL; + } + + return active_tbl_ptr; +} + +static inline int +pack_hook_tbl(hook_tbl_t *const hook_tbl, unsigned int idx) +{ + hook_fn_t *const fn_tbl = hook_tbl->tbl; + + if (unlikely(idx >= EM_CALLBACKS_MAX || + fn_tbl[idx].void_hook != NULL)) + return -1; + + for (unsigned int i = idx; i < EM_CALLBACKS_MAX - 1; i++) { + if (fn_tbl[i + 1].void_hook != NULL) { + fn_tbl[i].void_hook = fn_tbl[i + 1].void_hook; + fn_tbl[i + 1].void_hook = NULL; + } else { + break; + } + } + + return 0; +} diff --git a/src/em_hooks.h b/src/em_hooks.h index b07a95b6..1d307acc 100644 --- a/src/em_hooks.h +++ b/src/em_hooks.h @@ -1,115 +1,153 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_HOOKS_H_ -#define EM_HOOKS_H_ - -/** - * @file - * EM internal API callback hook functions - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * EM API-callback hooks init function (called once at startup). - */ -em_status_t -hooks_init(const em_api_hooks_t *api_hooks); - -/** - * Helper function for registering callback hook functions. - * - * @return EM_OK if there was room left to register a new callback - */ -em_status_t -hook_register(uint8_t type, hook_fn_t hook_fn); - -/** - * Helper function for unregistering dispatcher callback functions. - * - * @return EM_OK if there was room left to register a new callback - */ -em_status_t -hook_unregister(uint8_t type, hook_fn_t hook_fn); - -static inline void -call_api_hooks_alloc(const em_event_t events[], const int num_act, - const int num_req, size_t size, em_event_type_t type, - em_pool_t pool) -{ - const hook_tbl_t *alloc_hook_tbl = em_shm->alloc_hook_tbl; - em_api_hook_alloc_t alloc_hook_fn; - - for (int i = 0; i < EM_CALLBACKS_MAX; i++) { - alloc_hook_fn = alloc_hook_tbl->tbl[i].alloc; - if (alloc_hook_fn == NULL) - return; - alloc_hook_fn(events, num_act, num_req, size, type, pool); - } -} - -static inline void -call_api_hooks_free(const em_event_t events[], const int num) -{ - const hook_tbl_t *free_hook_tbl = em_shm->free_hook_tbl; - em_api_hook_free_t free_hook_fn; - - for (int i = 0; i < EM_CALLBACKS_MAX; i++) { - free_hook_fn = free_hook_tbl->tbl[i].free; - if (free_hook_fn == NULL) - return; - free_hook_fn(events, num); - } -} - -static inline void -call_api_hooks_send(const em_event_t events[], const int num, - em_queue_t queue, em_event_group_t event_group) -{ - const hook_tbl_t *send_hook_tbl = em_shm->send_hook_tbl; - em_api_hook_send_t send_hook_fn; - int i; - - for (i = 0; i < EM_CALLBACKS_MAX; i++) { - send_hook_fn = send_hook_tbl->tbl[i].send; - if (send_hook_fn == NULL) - return; - send_hook_fn(events, num, queue, event_group); - } -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_HOOKS_H_ */ +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_HOOKS_H_ +#define EM_HOOKS_H_ + +/** + * @file + * EM internal API callback hook functions + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * EM API-callback and idle hooks init function (called once at startup). + */ +em_status_t +hooks_init(const em_api_hooks_t *api_hooks, const em_idle_hooks_t *idle_hooks); + +/** + * Helper function for registering callback hook functions. + * + * @return EM_OK if there was room left to register a new callback + */ +em_status_t +hook_register(uint8_t type, hook_fn_t hook_fn); + +/** + * Helper function for unregistering dispatcher callback functions. + * + * @return EM_OK if there was room left to register a new callback + */ +em_status_t +hook_unregister(uint8_t type, hook_fn_t hook_fn); + +static inline void +call_api_hooks_alloc(const em_event_t events[], const int num_act, + const int num_req, uint32_t size, em_event_type_t type, + em_pool_t pool) +{ + const hook_tbl_t *alloc_hook_tbl = em_shm->alloc_hook_tbl; + em_api_hook_alloc_t alloc_hook_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + alloc_hook_fn = alloc_hook_tbl->tbl[i].alloc; + if (alloc_hook_fn == NULL) + return; + alloc_hook_fn(events, num_act, num_req, size, type, pool); + } +} + +static inline void +call_api_hooks_free(const em_event_t events[], const int num) +{ + const hook_tbl_t *free_hook_tbl = em_shm->free_hook_tbl; + em_api_hook_free_t free_hook_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + free_hook_fn = free_hook_tbl->tbl[i].free; + if (free_hook_fn == NULL) + return; + free_hook_fn(events, num); + } +} + +static inline void +call_api_hooks_send(const em_event_t events[], const int num, + em_queue_t queue, em_event_group_t event_group) +{ + const hook_tbl_t *send_hook_tbl = em_shm->send_hook_tbl; + em_api_hook_send_t send_hook_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + send_hook_fn = send_hook_tbl->tbl[i].send; + if (send_hook_fn == NULL) + return; + send_hook_fn(events, num, queue, event_group); + } +} + +static inline void call_idle_hooks_to_idle(uint64_t to_idle_delay_ns) +{ + const hook_tbl_t *to_idle_hook_tbl = em_shm->to_idle_hook_tbl; + em_idle_hook_to_idle_t to_idle_hook_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + to_idle_hook_fn = to_idle_hook_tbl->tbl[i].to_idle; + if (to_idle_hook_fn == NULL) + return; + to_idle_hook_fn(to_idle_delay_ns); + } +} + +static inline void call_idle_hooks_to_active(void) +{ + const hook_tbl_t *to_active_hook_tbl = em_shm->to_active_hook_tbl; + em_idle_hook_to_active_t to_active_hook_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + to_active_hook_fn = to_active_hook_tbl->tbl[i].to_active; + if (to_active_hook_fn == NULL) + return; + to_active_hook_fn(); + } +} + +static inline void call_idle_hooks_while_idle(void) +{ + const hook_tbl_t *while_idle_hook_tbl = em_shm->while_idle_hook_tbl; + em_idle_hook_while_idle_t while_idle_hook_fn; + + for (int i = 0; i < EM_CALLBACKS_MAX; i++) { + while_idle_hook_fn = while_idle_hook_tbl->tbl[i].while_idle; + if (while_idle_hook_fn == NULL) + return; + while_idle_hook_fn(); + } +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_HOOKS_H_ */ diff --git a/src/em_include.h b/src/em_include.h index 260f37a8..edf0a70d 100644 --- a/src/em_include.h +++ b/src/em_include.h @@ -1,121 +1,124 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_INCLUDE_H_ -#define EM_INCLUDE_H_ - -/** - * @file - * EM internal include file - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE /* for strnlen() */ -#endif -#include -#include -#include -#include - -#include -#include -#include - -/* ODP API */ -#include -#include - -/** - * @def EM_CHECK_INIT_CALLED - * - * Magic number used detect whether an initialization function has been called - * prior to a create-function. - * - * An error will be reported and the creation will fail if the call to _init() - * has been omitted. - */ -#define EM_CHECK_INIT_CALLED ((uint32_t)0xACDCBEEF) - -#include "misc/list.h" -#include "misc/objpool.h" - -#include "em_init.h" - -#include "em_atomic.h" -#include "em_daemon_eo_types.h" -#include "em_chaining_types.h" -#include "em_core_types.h" -#include "em_error_types.h" -#include "em_pool_types.h" -#include "em_eo_types.h" -#include "em_event_group_types.h" -#include "em_queue_types.h" -#include "em_event_types.h" -#include "em_queue_group_types.h" -#include "em_atomic_group_types.h" -#include "em_internal_event_types.h" -#include "em_dispatcher_types.h" -#include "em_sync_api_types.h" -#include "em_hook_types.h" -#include "em_libconfig_types.h" -#include "add-ons/event_timer/em_timer_types.h" -#include "em_cli_types.h" - -#include "em_mem.h" - -#include "em_core.h" -#include "em_error.h" -#include "em_eo.h" -#include "em_internal_event.h" -#include "em_info.h" -#include "em_pool.h" -#include "em_event_state.h" -#include "em_event.h" -#include "em_queue.h" -#include "em_queue_group.h" -#include "em_event_group.h" -#include "em_daemon_eo.h" -#include "em_atomic_group.h" -#include "em_dispatcher.h" -#include "em_libconfig.h" -#include "em_hooks.h" -#include "em_chaining.h" -#include "em_cli.h" - -#ifdef __cplusplus -} -#endif - -#endif /* EM_INCLUDE_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_INCLUDE_H_ +#define EM_INCLUDE_H_ + +/** + * @file + * EM internal include file + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE /* for strnlen() */ +#endif +#include +#include +#include +#include + +#include +#include +#include + +/* ODP API */ +#include +#include + +/** + * @def EM_CHECK_INIT_CALLED + * + * Magic number used detect whether an initialization function has been called + * prior to a create-function. + * + * An error will be reported and the creation will fail if the call to _init() + * has been omitted. + */ +#define EM_CHECK_INIT_CALLED ((uint32_t)0xACDCBEEF) + +#include "misc/list.h" +#include "misc/objpool.h" + +#include "em_init.h" + +#include "em_atomic.h" +#include "em_daemon_eo_types.h" +#include "em_chaining_types.h" +#include "em_core_types.h" +#include "em_error_types.h" +#include "em_pool_types.h" +#include "em_eo_types.h" +#include "em_event_group_types.h" +#include "em_queue_types.h" +#include "em_event_types.h" +#include "em_queue_group_types.h" +#include "em_atomic_group_types.h" +#include "em_internal_event_types.h" +#include "em_dispatcher_types.h" +#include "em_sync_api_types.h" +#include "em_hook_types.h" +#include "em_libconfig_types.h" +#include "add-ons/event_timer/em_timer_types.h" +#include "em_cli_types.h" + +#include "em_mem.h" + +#include "em_event_inline.h" +#include "em_queue_inline.h" + +#include "em_core.h" +#include "em_error.h" +#include "em_eo.h" +#include "em_internal_event.h" +#include "em_info.h" +#include "em_pool.h" +#include "em_event_state.h" +#include "em_event.h" +#include "em_queue.h" +#include "em_queue_group.h" +#include "em_event_group.h" +#include "em_daemon_eo.h" +#include "em_atomic_group.h" +#include "em_hooks.h" +#include "em_dispatcher.h" +#include "em_libconfig.h" +#include "em_chaining.h" +#include "em_cli.h" + +#ifdef __cplusplus +} +#endif + +#endif /* EM_INCLUDE_H_ */ diff --git a/src/em_info.c b/src/em_info.c index 8c047f8d..88945082 100644 --- a/src/em_info.c +++ b/src/em_info.c @@ -1,358 +1,356 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* Copyright (c) 2018, Linaro Limited - * Copyright (c) 2020-2021, Nokia - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -/* sysinfo printouts from the ODP example: odp_sysinfo.c */ -/* SW ISA detection from the /odp_sysinfo_parse.c files */ - -#include "em_include.h" - -static const char *cpu_arch_name(odp_cpu_arch_t cpu_arch) -{ - switch (cpu_arch) { - case ODP_CPU_ARCH_ARM: - return "ARM"; - case ODP_CPU_ARCH_MIPS: - return "MIPS"; - case ODP_CPU_ARCH_PPC: - return "PPC"; - case ODP_CPU_ARCH_RISCV: - return "RISC-V"; - case ODP_CPU_ARCH_X86: - return "x86"; - default: - return "Unknown"; - } -} - -static const char *arm_isa_name(odp_cpu_arch_arm_t isa) -{ - switch (isa) { - case ODP_CPU_ARCH_ARMV6: - return "ARMv6"; - case ODP_CPU_ARCH_ARMV7: - return "ARMv7-A"; - case ODP_CPU_ARCH_ARMV8_0: - return "ARMv8.0-A"; - case ODP_CPU_ARCH_ARMV8_1: - return "ARMv8.1-A"; - case ODP_CPU_ARCH_ARMV8_2: - return "ARMv8.2-A"; - case ODP_CPU_ARCH_ARMV8_3: - return "ARMv8.3-A"; - case ODP_CPU_ARCH_ARMV8_4: - return "ARMv8.4-A"; - case ODP_CPU_ARCH_ARMV8_5: - return "ARMv8.5-A"; - case ODP_CPU_ARCH_ARMV8_6: - return "ARMv8.6-A"; - case ODP_CPU_ARCH_ARMV8_7: - return "ARMv8.7-A"; - case ODP_CPU_ARCH_ARMV9_0: - return "ARMv9.0-A"; - case ODP_CPU_ARCH_ARMV9_1: - return "ARMv9.1-A"; - case ODP_CPU_ARCH_ARMV9_2: - return "ARMv9.2-A"; - default: - return "Unknown"; - } -} - -static const char *x86_isa_name(odp_cpu_arch_x86_t isa) -{ - switch (isa) { - case ODP_CPU_ARCH_X86_I686: - return "x86_i686"; - case ODP_CPU_ARCH_X86_64: - return "x86_64"; - default: - return "Unknown"; - } -} - -static const char *cpu_arch_isa_name(odp_cpu_arch_t cpu_arch, - odp_cpu_arch_isa_t cpu_arch_isa) -{ - switch (cpu_arch) { - case ODP_CPU_ARCH_ARM: - return arm_isa_name(cpu_arch_isa.arm); - case ODP_CPU_ARCH_MIPS: - return "Unknown"; - case ODP_CPU_ARCH_PPC: - return "Unknown"; - case ODP_CPU_ARCH_RISCV: - return "Unknown"; - case ODP_CPU_ARCH_X86: - return x86_isa_name(cpu_arch_isa.x86); - default: - return "Unknown"; - } -} - -/* - * Detect the ARM ISA used when compiling the em-odp library. - * (based on the /odp_sysinfo_parse.c files) - */ -static odp_cpu_arch_arm_t detect_sw_isa_arm(void) -{ - odp_cpu_arch_arm_t isa_arm = ODP_CPU_ARCH_ARM_UNKNOWN; - -#if defined(__ARM_ARCH) - - if (__ARM_ARCH == 6) { - isa_arm = ODP_CPU_ARCH_ARMV6; - } else if (__ARM_ARCH == 7) { - isa_arm = ODP_CPU_ARCH_ARMV7; - } else if (__ARM_ARCH == 8) { - #ifdef __ARM_FEATURE_QRDMX - /* v8.1 or higher */ - isa_arm = ODP_CPU_ARCH_ARMV8_1; - #else - isa_arm = ODP_CPU_ARCH_ARMV8_0; - #endif - } - - if (__ARM_ARCH >= 800) { - /* ACLE 2018 defines that from v8.1 onwards the value includes - * the minor version number: __ARM_ARCH = X * 100 + Y - * E.g. for Armv8.1 __ARM_ARCH = 801 - */ - int major = __ARM_ARCH / 100; - int minor = __ARM_ARCH - (major * 100); - - if (major == 8) { - switch (minor) { - case 0: - isa_arm = ODP_CPU_ARCH_ARMV8_0; - break; - case 1: - isa_arm = ODP_CPU_ARCH_ARMV8_1; - break; - case 2: - isa_arm = ODP_CPU_ARCH_ARMV8_2; - break; - case 3: - isa_arm = ODP_CPU_ARCH_ARMV8_3; - break; - case 4: - isa_arm = ODP_CPU_ARCH_ARMV8_4; - break; - case 5: - isa_arm = ODP_CPU_ARCH_ARMV8_5; - break; - case 6: - isa_arm = ODP_CPU_ARCH_ARMV8_6; - break; - case 7: - isa_arm = ODP_CPU_ARCH_ARMV8_7; - break; - default: - isa_arm = ODP_CPU_ARCH_ARM_UNKNOWN; - break; - } - } else if (major == 9) { - switch (minor) { - case 0: - isa_arm = ODP_CPU_ARCH_ARMV9_0; - break; - case 1: - isa_arm = ODP_CPU_ARCH_ARMV9_1; - break; - case 2: - isa_arm = ODP_CPU_ARCH_ARMV9_2; - break; - default: - isa_arm = ODP_CPU_ARCH_ARM_UNKNOWN; - break; - } - } - } -#endif - - return isa_arm; -} - -/* - * Detect the x86 ISA used when compiling the em-odp library. - * (based on the /odp_sysinfo_parse.c files) - */ -static odp_cpu_arch_x86_t detect_sw_isa_x86(void) -{ - odp_cpu_arch_x86_t isa_x86 = ODP_CPU_ARCH_X86_UNKNOWN; - -#if defined __x86_64 || defined __x86_64__ - isa_x86 = ODP_CPU_ARCH_X86_64; -#elif defined __i686 || defined __i686__ - isa_x86 = ODP_CPU_ARCH_X86_I686; -#endif - return isa_x86; -} - -static odp_cpu_arch_mips_t detect_sw_isa_mips(void) -{ - return ODP_CPU_ARCH_MIPS_UNKNOWN; -} - -static odp_cpu_arch_ppc_t detect_sw_isa_ppc(void) -{ - return ODP_CPU_ARCH_PPC_UNKNOWN; -} - -static odp_cpu_arch_riscv_t detect_sw_isa_riscv(void) -{ - return ODP_CPU_ARCH_RISCV_UNKNOWN; -} - -/* - * Detect the SW CPU ISA used when compiling the em-odp library. - * (based on the /odp_sysinfo_parse.c files) - */ -static odp_cpu_arch_isa_t detect_sw_isa(odp_cpu_arch_t cpu_arch) -{ - odp_cpu_arch_isa_t sw_isa; - - switch (cpu_arch) { - case ODP_CPU_ARCH_ARM: - sw_isa.arm = detect_sw_isa_arm(); - break; - case ODP_CPU_ARCH_MIPS: - sw_isa.mips = detect_sw_isa_mips(); - break; - case ODP_CPU_ARCH_PPC: - sw_isa.ppc = detect_sw_isa_ppc(); - break; - case ODP_CPU_ARCH_RISCV: - sw_isa.riscv = detect_sw_isa_riscv(); - break; - case ODP_CPU_ARCH_X86: - sw_isa.x86 = detect_sw_isa_x86(); - break; - default: - sw_isa.arm = ODP_CPU_ARCH_ARM_UNKNOWN; - break; - } - - return sw_isa; -} - -void print_core_map_info(void) -{ - int logic_core; - - EM_PRINT("Core mapping: EM-core <-> phys-core <-> ODP-thread\n"); - - for (logic_core = 0; logic_core < em_core_count(); logic_core++) { - EM_PRINT(" %2i %2i %2i\n", - logic_core, - em_core_id_get_physical(logic_core), - logic_to_thr_core_id(logic_core)); - } - - EM_PRINT("\n"); -} - -void print_cpu_arch_info(void) -{ - odp_system_info_t sysinfo; - int err; - - err = odp_system_info(&sysinfo); - if (err) { - EM_PRINT("%s(): odp_system_info() call failed:%d\n", - __func__, err); - return; - } - - /* detect & print EM SW ISA info here also */ - odp_cpu_arch_isa_t isa_em = detect_sw_isa(sysinfo.cpu_arch); - - const char *cpu_arch = cpu_arch_name(sysinfo.cpu_arch); - const char *hw_isa = cpu_arch_isa_name(sysinfo.cpu_arch, - sysinfo.cpu_isa_hw); - const char *sw_isa_odp = cpu_arch_isa_name(sysinfo.cpu_arch, - sysinfo.cpu_isa_sw); - const char *sw_isa_em = cpu_arch_isa_name(sysinfo.cpu_arch, isa_em); - - EM_PRINT("CPU model: %s\n" - "CPU arch: %s\n" - "CPU ISA version: %s\n" - " SW ISA version (ODP): %s\n" - " SW ISA version (EM): %s\n", - odp_cpu_model_str(), cpu_arch, - hw_isa, sw_isa_odp, sw_isa_em); -} - -static void print_odp_info(void) -{ - EM_PRINT("ODP API version: %s\n" - "ODP impl name: %s\n" - "ODP impl details: %s\n", - odp_version_api_str(), - odp_version_impl_name(), - odp_version_impl_str()); -} - -/** - * Print information about EM & the environment - */ -void print_em_info(void) -{ - EM_PRINT("\n" - "===========================================================\n" - "EM Info on target: %s\n" - "===========================================================\n" - "EM API version: %s\n" - "EM version: %s, " -#ifdef EM_64_BIT - "64 bit " -#else - "32 bit " -#endif - "(EM_CHECK_LEVEL:%d, EM_ESV_ENABLE:%d)\n" - "EM build info: %s\n", - EM_TARGET_STR, EM_VERSION_API_STR, EM_VERSION_STR, - EM_CHECK_LEVEL, EM_ESV_ENABLE, - EM_BUILD_INFO_STR); - - print_odp_info(); - print_cpu_arch_info(); - print_core_map_info(); - print_queue_capa(); - queue_group_info_print_all(); - em_pool_info_print_all(); - print_event_info(); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Copyright (c) 2018, Linaro Limited + * Copyright (c) 2020-2021, Nokia + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/* sysinfo printouts from the ODP example: odp_sysinfo.c */ +/* SW ISA detection from the /odp_sysinfo_parse.c files */ + +#include "em_include.h" + +static const char *cpu_arch_name(odp_cpu_arch_t cpu_arch) +{ + switch (cpu_arch) { + case ODP_CPU_ARCH_ARM: + return "ARM"; + case ODP_CPU_ARCH_MIPS: + return "MIPS"; + case ODP_CPU_ARCH_PPC: + return "PPC"; + case ODP_CPU_ARCH_RISCV: + return "RISC-V"; + case ODP_CPU_ARCH_X86: + return "x86"; + default: + return "Unknown"; + } +} + +static const char *arm_isa_name(odp_cpu_arch_arm_t isa) +{ + switch (isa) { + case ODP_CPU_ARCH_ARMV6: + return "ARMv6"; + case ODP_CPU_ARCH_ARMV7: + return "ARMv7-A"; + case ODP_CPU_ARCH_ARMV8_0: + return "ARMv8.0-A"; + case ODP_CPU_ARCH_ARMV8_1: + return "ARMv8.1-A"; + case ODP_CPU_ARCH_ARMV8_2: + return "ARMv8.2-A"; + case ODP_CPU_ARCH_ARMV8_3: + return "ARMv8.3-A"; + case ODP_CPU_ARCH_ARMV8_4: + return "ARMv8.4-A"; + case ODP_CPU_ARCH_ARMV8_5: + return "ARMv8.5-A"; + case ODP_CPU_ARCH_ARMV8_6: + return "ARMv8.6-A"; + case ODP_CPU_ARCH_ARMV8_7: + return "ARMv8.7-A"; + case ODP_CPU_ARCH_ARMV9_0: + return "ARMv9.0-A"; + case ODP_CPU_ARCH_ARMV9_1: + return "ARMv9.1-A"; + case ODP_CPU_ARCH_ARMV9_2: + return "ARMv9.2-A"; + default: + return "Unknown"; + } +} + +static const char *x86_isa_name(odp_cpu_arch_x86_t isa) +{ + switch (isa) { + case ODP_CPU_ARCH_X86_I686: + return "x86_i686"; + case ODP_CPU_ARCH_X86_64: + return "x86_64"; + default: + return "Unknown"; + } +} + +static const char *cpu_arch_isa_name(odp_cpu_arch_t cpu_arch, + odp_cpu_arch_isa_t cpu_arch_isa) +{ + switch (cpu_arch) { + case ODP_CPU_ARCH_ARM: + return arm_isa_name(cpu_arch_isa.arm); + case ODP_CPU_ARCH_MIPS: + return "Unknown"; + case ODP_CPU_ARCH_PPC: + return "Unknown"; + case ODP_CPU_ARCH_RISCV: + return "Unknown"; + case ODP_CPU_ARCH_X86: + return x86_isa_name(cpu_arch_isa.x86); + default: + return "Unknown"; + } +} + +/* + * Detect the ARM ISA used when compiling the em-odp library. + * (based on the /odp_sysinfo_parse.c files) + */ +static odp_cpu_arch_arm_t detect_sw_isa_arm(void) +{ + odp_cpu_arch_arm_t isa_arm = ODP_CPU_ARCH_ARM_UNKNOWN; + +#if defined(__ARM_ARCH) + + if (__ARM_ARCH == 6) { + isa_arm = ODP_CPU_ARCH_ARMV6; + } else if (__ARM_ARCH == 7) { + isa_arm = ODP_CPU_ARCH_ARMV7; + } else if (__ARM_ARCH == 8) { + #ifdef __ARM_FEATURE_QRDMX + /* v8.1 or higher */ + isa_arm = ODP_CPU_ARCH_ARMV8_1; + #else + isa_arm = ODP_CPU_ARCH_ARMV8_0; + #endif + } + + if (__ARM_ARCH >= 800) { + /* ACLE 2018 defines that from v8.1 onwards the value includes + * the minor version number: __ARM_ARCH = X * 100 + Y + * E.g. for Armv8.1 __ARM_ARCH = 801 + */ + int major = __ARM_ARCH / 100; + int minor = __ARM_ARCH - (major * 100); + + if (major == 8) { + switch (minor) { + case 0: + isa_arm = ODP_CPU_ARCH_ARMV8_0; + break; + case 1: + isa_arm = ODP_CPU_ARCH_ARMV8_1; + break; + case 2: + isa_arm = ODP_CPU_ARCH_ARMV8_2; + break; + case 3: + isa_arm = ODP_CPU_ARCH_ARMV8_3; + break; + case 4: + isa_arm = ODP_CPU_ARCH_ARMV8_4; + break; + case 5: + isa_arm = ODP_CPU_ARCH_ARMV8_5; + break; + case 6: + isa_arm = ODP_CPU_ARCH_ARMV8_6; + break; + case 7: + isa_arm = ODP_CPU_ARCH_ARMV8_7; + break; + default: + isa_arm = ODP_CPU_ARCH_ARM_UNKNOWN; + break; + } + } else if (major == 9) { + switch (minor) { + case 0: + isa_arm = ODP_CPU_ARCH_ARMV9_0; + break; + case 1: + isa_arm = ODP_CPU_ARCH_ARMV9_1; + break; + case 2: + isa_arm = ODP_CPU_ARCH_ARMV9_2; + break; + default: + isa_arm = ODP_CPU_ARCH_ARM_UNKNOWN; + break; + } + } + } +#endif + + return isa_arm; +} + +/* + * Detect the x86 ISA used when compiling the em-odp library. + * (based on the /odp_sysinfo_parse.c files) + */ +static odp_cpu_arch_x86_t detect_sw_isa_x86(void) +{ + odp_cpu_arch_x86_t isa_x86 = ODP_CPU_ARCH_X86_UNKNOWN; + +#if defined __x86_64 || defined __x86_64__ + isa_x86 = ODP_CPU_ARCH_X86_64; +#elif defined __i686 || defined __i686__ + isa_x86 = ODP_CPU_ARCH_X86_I686; +#endif + return isa_x86; +} + +static odp_cpu_arch_mips_t detect_sw_isa_mips(void) +{ + return ODP_CPU_ARCH_MIPS_UNKNOWN; +} + +static odp_cpu_arch_ppc_t detect_sw_isa_ppc(void) +{ + return ODP_CPU_ARCH_PPC_UNKNOWN; +} + +static odp_cpu_arch_riscv_t detect_sw_isa_riscv(void) +{ + return ODP_CPU_ARCH_RISCV_UNKNOWN; +} + +/* + * Detect the SW CPU ISA used when compiling the em-odp library. + * (based on the /odp_sysinfo_parse.c files) + */ +static odp_cpu_arch_isa_t detect_sw_isa(odp_cpu_arch_t cpu_arch) +{ + odp_cpu_arch_isa_t sw_isa; + + switch (cpu_arch) { + case ODP_CPU_ARCH_ARM: + sw_isa.arm = detect_sw_isa_arm(); + break; + case ODP_CPU_ARCH_MIPS: + sw_isa.mips = detect_sw_isa_mips(); + break; + case ODP_CPU_ARCH_PPC: + sw_isa.ppc = detect_sw_isa_ppc(); + break; + case ODP_CPU_ARCH_RISCV: + sw_isa.riscv = detect_sw_isa_riscv(); + break; + case ODP_CPU_ARCH_X86: + sw_isa.x86 = detect_sw_isa_x86(); + break; + default: + sw_isa.arm = ODP_CPU_ARCH_ARM_UNKNOWN; + break; + } + + return sw_isa; +} + +void print_core_map_info(void) +{ + EM_PRINT("Core mapping: EM-core <-> phys-core <-> ODP-thread\n"); + + for (int logic_core = 0; logic_core < em_core_count(); logic_core++) { + EM_PRINT(" %2i %2i %2i\n", + logic_core, + em_core_id_get_physical(logic_core), + logic_to_thr_core_id(logic_core)); + } + + EM_PRINT("\n"); +} + +void print_cpu_arch_info(void) +{ + odp_system_info_t sysinfo; + int err; + + err = odp_system_info(&sysinfo); + if (err) { + EM_PRINT("%s(): odp_system_info() call failed:%d\n", + __func__, err); + return; + } + + /* detect & print EM SW ISA info here also */ + odp_cpu_arch_isa_t isa_em = detect_sw_isa(sysinfo.cpu_arch); + + const char *cpu_arch = cpu_arch_name(sysinfo.cpu_arch); + const char *hw_isa = cpu_arch_isa_name(sysinfo.cpu_arch, + sysinfo.cpu_isa_hw); + const char *sw_isa_odp = cpu_arch_isa_name(sysinfo.cpu_arch, + sysinfo.cpu_isa_sw); + const char *sw_isa_em = cpu_arch_isa_name(sysinfo.cpu_arch, isa_em); + + EM_PRINT("CPU model: %s\n" + "CPU arch: %s\n" + "CPU ISA version: %s\n" + " SW ISA version (ODP): %s\n" + " SW ISA version (EM): %s\n", + odp_cpu_model_str(), cpu_arch, + hw_isa, sw_isa_odp, sw_isa_em); +} + +static void print_odp_info(void) +{ + EM_PRINT("ODP API version: %s\n" + "ODP impl name: %s\n" + "ODP impl details: %s\n", + odp_version_api_str(), + odp_version_impl_name(), + odp_version_impl_str()); +} + +/** + * Print information about EM & the environment + */ +void print_em_info(void) +{ + EM_PRINT("\n" + "===========================================================\n" + "EM Info on target: %s\n" + "===========================================================\n" + "EM API version: %s\n" + "EM version: %s, " +#ifdef EM_64_BIT + "64 bit " +#else + "32 bit " +#endif + "(EM_CHECK_LEVEL:%d, EM_ESV_ENABLE:%d)\n" + "EM build info: %s\n", + EM_TARGET_STR, EM_VERSION_API_STR, EM_VERSION_STR, + EM_CHECK_LEVEL, EM_ESV_ENABLE, + EM_BUILD_INFO_STR); + + print_odp_info(); + print_cpu_arch_info(); + print_core_map_info(); + print_queue_capa(); + queue_group_info_print_all(); + em_pool_info_print_all(); + print_event_info(); +} diff --git a/src/em_init.c b/src/em_init.c index 64163273..f884cb75 100644 --- a/src/em_init.c +++ b/src/em_init.c @@ -1,151 +1,149 @@ -/* Copyright (c) 2020 Nokia Solutions and Networks - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include "em_include.h" - -em_status_t -poll_drain_mask_check(const em_core_mask_t *logic_mask, - const em_core_mask_t *poll_drain_mask) -{ - /* check if mask is zero (all cores, OK) */ - if (em_core_mask_iszero(poll_drain_mask)) - return EM_OK; - - int i; - - /* check mask validity */ - for (i = 0; i < EM_MAX_CORES; i++) { - if (em_core_mask_isset(i, poll_drain_mask) && - !em_core_mask_isset(i, logic_mask)) - return EM_ERR_OPERATION_FAILED; - } - return EM_OK; -} - -em_status_t -input_poll_init(const em_core_mask_t *logic_mask, const em_conf_t *conf) -{ - return poll_drain_mask_check(logic_mask, - &conf->input.input_poll_mask); -} - -em_status_t -output_drain_init(const em_core_mask_t *logic_mask, const em_conf_t *conf) -{ - return poll_drain_mask_check(logic_mask, - &conf->output.output_drain_mask); -} - -em_status_t -poll_drain_mask_set_local(bool *const result /*out*/, int core_id, - const em_core_mask_t *mask) -{ - if (em_core_mask_iszero(mask) || em_core_mask_isset(core_id, mask)) - *result = true; - else - *result = false; - return EM_OK; -} - -em_status_t -input_poll_init_local(bool *const result /*out*/, int core_id, - const em_conf_t *conf) -{ - if (conf->input.input_poll_fn == NULL) { - *result = false; - return EM_OK; - } - return poll_drain_mask_set_local(result, core_id, - &conf->input.input_poll_mask); -} - -em_status_t -output_drain_init_local(bool *const result /*out*/, int core_id, - const em_conf_t *conf) -{ - if (conf->output.output_drain_fn == NULL) { - *result = false; - return EM_OK; - } - return poll_drain_mask_set_local(result, core_id, - &conf->output.output_drain_mask); -} - -void core_log_fn_set(em_log_func_t func) -{ - em_locm_t *const locm = &em_locm; - - locm->log_fn = func; -} - -em_status_t init_ext_thread(void) -{ - em_locm_t *const locm = &em_locm; - odp_shm_t shm; - em_shm_t *shm_addr; - em_status_t stat = EM_OK; - - /* Make sure that em_shm is available in this external thread */ - shm = odp_shm_lookup("em_shm"); - RETURN_ERROR_IF(shm == ODP_SHM_INVALID, - EM_ERR_NOT_FOUND, EM_ESCOPE_INIT_CORE, - "Shared memory lookup failed!"); - - shm_addr = odp_shm_addr(shm); - RETURN_ERROR_IF(shm_addr == NULL, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, - "Shared memory ptr NULL"); - - if (shm_addr->conf.process_per_core && em_shm == NULL) - em_shm = shm_addr; - - RETURN_ERROR_IF(shm_addr != em_shm, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, - "Shared memory init fails: em_shm:%p != shm_addr:%p", - em_shm, shm_addr); - - stat = emcli_init_local(); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_INIT_CORE, - "Ext emcli_init_local() fails: %" PRI_STAT "", stat); - - /* - * Mark that this is an external thread, i.e. not an EM-core and thus - * will not participate in EM event dispatching. - */ - locm->is_external_thr = true; - - return EM_OK; -} - -em_status_t sync_api_init_local(void) -{ - em_locm_t *const locm = &em_locm; - int core = locm->core_id; - em_queue_t unsched_queue; - queue_elem_t *q_elem; - - unsched_queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + core); - if (unlikely(unsched_queue == EM_QUEUE_UNDEF)) - return EM_ERR_NOT_FOUND; - q_elem = queue_elem_get(unsched_queue); - if (unlikely(!q_elem)) - return EM_ERR_BAD_POINTER; - locm->sync_api.ctrl_poll.core_unsched_queue = unsched_queue; - locm->sync_api.ctrl_poll.core_unsched_qelem = q_elem; - locm->sync_api.ctrl_poll.core_odp_plain_queue = q_elem->odp_queue; - - unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); - if (unlikely(unsched_queue == EM_QUEUE_UNDEF)) - return EM_ERR_NOT_FOUND; - q_elem = queue_elem_get(unsched_queue); - if (unlikely(!q_elem)) - return EM_ERR_BAD_POINTER; - locm->sync_api.ctrl_poll.shared_unsched_queue = unsched_queue; - locm->sync_api.ctrl_poll.shared_unsched_qelem = q_elem; - locm->sync_api.ctrl_poll.shared_odp_plain_queue = q_elem->odp_queue; - - locm->sync_api.in_progress = false; - - return EM_OK; -} +/* Copyright (c) 2020 Nokia Solutions and Networks + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "em_include.h" + +em_status_t +poll_drain_mask_check(const em_core_mask_t *logic_mask, + const em_core_mask_t *poll_drain_mask) +{ + /* check if mask is zero (all cores, OK) */ + if (em_core_mask_iszero(poll_drain_mask)) + return EM_OK; + + /* check mask validity */ + for (int i = 0; i < EM_MAX_CORES; i++) { + if (em_core_mask_isset(i, poll_drain_mask) && + !em_core_mask_isset(i, logic_mask)) + return EM_ERR_OPERATION_FAILED; + } + return EM_OK; +} + +em_status_t +input_poll_init(const em_core_mask_t *logic_mask, const em_conf_t *conf) +{ + return poll_drain_mask_check(logic_mask, + &conf->input.input_poll_mask); +} + +em_status_t +output_drain_init(const em_core_mask_t *logic_mask, const em_conf_t *conf) +{ + return poll_drain_mask_check(logic_mask, + &conf->output.output_drain_mask); +} + +em_status_t +poll_drain_mask_set_local(bool *const result /*out*/, int core_id, + const em_core_mask_t *mask) +{ + if (em_core_mask_iszero(mask) || em_core_mask_isset(core_id, mask)) + *result = true; + else + *result = false; + return EM_OK; +} + +em_status_t +input_poll_init_local(bool *const result /*out*/, int core_id, + const em_conf_t *conf) +{ + if (conf->input.input_poll_fn == NULL) { + *result = false; + return EM_OK; + } + return poll_drain_mask_set_local(result, core_id, + &conf->input.input_poll_mask); +} + +em_status_t +output_drain_init_local(bool *const result /*out*/, int core_id, + const em_conf_t *conf) +{ + if (conf->output.output_drain_fn == NULL) { + *result = false; + return EM_OK; + } + return poll_drain_mask_set_local(result, core_id, + &conf->output.output_drain_mask); +} + +void core_log_fn_set(em_log_func_t func) +{ + em_locm_t *const locm = &em_locm; + + locm->log_fn = func; +} + +em_status_t init_ext_thread(void) +{ + em_locm_t *const locm = &em_locm; + odp_shm_t shm; + em_shm_t *shm_addr; + em_status_t stat = EM_OK; + + /* Make sure that em_shm is available in this external thread */ + shm = odp_shm_lookup("em_shm"); + RETURN_ERROR_IF(shm == ODP_SHM_INVALID, + EM_ERR_NOT_FOUND, EM_ESCOPE_INIT_CORE, + "Shared memory lookup failed!"); + + shm_addr = odp_shm_addr(shm); + RETURN_ERROR_IF(shm_addr == NULL, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, + "Shared memory ptr NULL"); + + if (shm_addr->conf.process_per_core && em_shm == NULL) + em_shm = shm_addr; + + RETURN_ERROR_IF(shm_addr != em_shm, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, + "Shared memory init fails: em_shm:%p != shm_addr:%p", + em_shm, shm_addr); + + stat = emcli_init_local(); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_INIT_CORE, + "Ext emcli_init_local() fails: %" PRI_STAT "", stat); + + /* + * Mark that this is an external thread, i.e. not an EM-core and thus + * will not participate in EM event dispatching. + */ + locm->is_external_thr = true; + + return EM_OK; +} + +em_status_t sync_api_init_local(void) +{ + em_locm_t *const locm = &em_locm; + int core = locm->core_id; + em_queue_t unsched_queue; + queue_elem_t *q_elem; + + unsched_queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + core); + if (unlikely(unsched_queue == EM_QUEUE_UNDEF)) + return EM_ERR_NOT_FOUND; + q_elem = queue_elem_get(unsched_queue); + if (unlikely(!q_elem)) + return EM_ERR_BAD_POINTER; + locm->sync_api.ctrl_poll.core_unsched_queue = unsched_queue; + locm->sync_api.ctrl_poll.core_unsched_qelem = q_elem; + locm->sync_api.ctrl_poll.core_odp_plain_queue = q_elem->odp_queue; + + unsched_queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); + if (unlikely(unsched_queue == EM_QUEUE_UNDEF)) + return EM_ERR_NOT_FOUND; + q_elem = queue_elem_get(unsched_queue); + if (unlikely(!q_elem)) + return EM_ERR_BAD_POINTER; + locm->sync_api.ctrl_poll.shared_unsched_queue = unsched_queue; + locm->sync_api.ctrl_poll.shared_unsched_qelem = q_elem; + locm->sync_api.ctrl_poll.shared_odp_plain_queue = q_elem->odp_queue; + + locm->sync_api.in_progress = false; + + return EM_OK; +} diff --git a/src/em_init.h b/src/em_init.h index 62ae5f35..0fba4bf1 100644 --- a/src/em_init.h +++ b/src/em_init.h @@ -1,161 +1,175 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_INIT_H_ -#define EM_INIT_H_ - -/** - * @file - * EM internal initialization types & definitions - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Initialization status info - */ -typedef struct { - /** init lock */ - env_spinlock_t lock; - /** Is em_init() completed? */ - int em_init_done; - /** The number of EM cores that have run em_init_core() */ - int em_init_core_cnt; -} init_t; - -/** - * EM config options read from the config file. - * - * See the config/em-odp.conf file for description of the options. - */ -typedef struct { - struct { - int statistics_enable; /* true/false */ - unsigned int align_offset; /* bytes */ - unsigned int pkt_headroom; /* bytes */ - size_t user_area_size; /* bytes */ - } pool; - - struct { - bool create_core_queue_groups; - } queue_group; - - struct { - unsigned int min_events_default; /* default min nbr of events */ - struct { - int map_mode; - int custom_map[EM_QUEUE_PRIO_NUM]; - } priority; - } queue; - - struct { - unsigned int num_order_queues; - } event_chaining; - - struct { - int enable; - int store_state; - int store_first_u32; - int prealloc_pools; - } esv; - - struct { - int enable; - const char *ip_addr; - int port; - } cli; - - struct { - unsigned int poll_ctrl_interval; - uint64_t poll_ctrl_interval_ns; - /** convert option 'poll_ctrl_interval_ns' to odp_time_t */ - odp_time_t poll_ctrl_interval_time; - - unsigned int poll_drain_interval; - uint64_t poll_drain_interval_ns; - odp_time_t poll_drain_interval_time; - } dispatch; -} opt_t; - -em_status_t -poll_drain_mask_check(const em_core_mask_t *logic_mask, - const em_core_mask_t *poll_drain_mask); - -em_status_t -input_poll_init(const em_core_mask_t *logic_mask, const em_conf_t *conf); - -em_status_t -output_drain_init(const em_core_mask_t *logic_mask, const em_conf_t *conf); - -em_status_t -poll_drain_mask_set_local(bool *const result /*out*/, int core_id, - const em_core_mask_t *mask); - -em_status_t -input_poll_init_local(bool *const result /*out*/, int core_id, - const em_conf_t *conf); - -em_status_t -output_drain_init_local(bool *const result /*out*/, int core_id, - const em_conf_t *conf); - -/** - * Set EM core local log function. - * - * Called by EM-core (= process, thread or bare metal core) when a - * different log function than EM internal log is needed. - * - */ -void -core_log_fn_set(em_log_func_t func); - -/** - * Initialize a thread external to EM. - * - * This function makes sure that EM shared memory has been setup properly before - * an EM external thread is created. - * - * Must be called once by non EM core which wants to access EM shared memory or - * use EM APIs. - * - * @return EM_OK if successful. - */ -em_status_t init_ext_thread(void); - -em_status_t sync_api_init_local(void); - -#ifdef __cplusplus -} -#endif - -#endif /* EM_INIT_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_INIT_H_ +#define EM_INIT_H_ + +/** + * @file + * EM internal initialization types & definitions + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Initialization status info + */ +typedef struct { + /** init lock */ + env_spinlock_t lock; + /** Is em_init() completed? */ + int em_init_done; + /** The number of EM cores that have run em_init_core() */ + int em_init_core_cnt; +} init_t; + +/** + * Pool configuration + */ +typedef struct { + em_pool_t pool; + char name[EM_POOL_NAME_LEN]; + em_pool_cfg_t cfg; +} startup_pool_conf_t; + +/** + * EM config options read from the config file. + * + * See the config/em-odp.conf file for description of the options. + */ +typedef struct { + struct { + int statistics_enable; /* true/false */ + unsigned int align_offset; /* bytes */ + unsigned int pkt_headroom; /* bytes */ + size_t user_area_size; /* bytes */ + } pool; + + struct { + bool create_core_queue_groups; + } queue_group; + + struct { + unsigned int min_events_default; /* default min nbr of events */ + struct { + int map_mode; + int custom_map[EM_QUEUE_PRIO_NUM]; + } priority; + } queue; + + struct { + bool order_keep; + unsigned int num_order_queues; + } event_chaining; + + struct { + int enable; + int store_state; + int store_first_u32; + int prealloc_pools; + } esv; + + struct { + int enable; + const char *ip_addr; + int port; + } cli; + + struct { + unsigned int poll_ctrl_interval; + uint64_t poll_ctrl_interval_ns; + /** convert option 'poll_ctrl_interval_ns' to odp_time_t */ + odp_time_t poll_ctrl_interval_time; + + unsigned int poll_drain_interval; + uint64_t poll_drain_interval_ns; + odp_time_t poll_drain_interval_time; + } dispatch; + struct { + uint32_t num; + startup_pool_conf_t conf[EM_CONFIG_POOLS]; + } startup_pools; +} opt_t; + +em_status_t +poll_drain_mask_check(const em_core_mask_t *logic_mask, + const em_core_mask_t *poll_drain_mask); + +em_status_t +input_poll_init(const em_core_mask_t *logic_mask, const em_conf_t *conf); + +em_status_t +output_drain_init(const em_core_mask_t *logic_mask, const em_conf_t *conf); + +em_status_t +poll_drain_mask_set_local(bool *const result /*out*/, int core_id, + const em_core_mask_t *mask); + +em_status_t +input_poll_init_local(bool *const result /*out*/, int core_id, + const em_conf_t *conf); + +em_status_t +output_drain_init_local(bool *const result /*out*/, int core_id, + const em_conf_t *conf); + +/** + * Set EM core local log function. + * + * Called by EM-core (= process, thread or bare metal core) when a + * different log function than EM internal log is needed. + * + */ +void +core_log_fn_set(em_log_func_t func); + +/** + * Initialize a thread external to EM. + * + * This function makes sure that EM shared memory has been setup properly before + * an EM external thread is created. + * + * Must be called once by non EM core which wants to access EM shared memory or + * use EM APIs. + * + * @return EM_OK if successful. + */ +em_status_t init_ext_thread(void); + +em_status_t sync_api_init_local(void); + +#ifdef __cplusplus +} +#endif + +#endif /* EM_INIT_H_ */ diff --git a/src/em_internal_event.c b/src/em_internal_event.c index c0335f6d..d6fb8aee 100644 --- a/src/em_internal_event.c +++ b/src/em_internal_event.c @@ -1,531 +1,528 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2014-2016, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * EM Internal Control - */ - -#include "em_include.h" - -static void -i_event__internal_done(const internal_event_t *i_ev); - -/** - * Sends an internal control event to each core set in 'mask'. - * - * When all cores set in 'mask' has processed the event an additional - * internal 'done' msg is sent to synchronize - this done-event can then - * trigger any notifications for the user that the operation was completed. - * Processing of the 'done' event will also call the 'f_done_callback' - * function if given. - * - * @return 0 on success, otherwise return the number of ctrl events that could - * not be sent due to error - */ -int -send_core_ctrl_events(const em_core_mask_t *const mask, em_event_t ctrl_event, - void (*f_done_callback)(void *arg_ptr), - void *f_done_arg_ptr, - int num_notif, const em_notif_t notif_tbl[], - bool sync_operation) -{ - em_status_t err; - em_event_group_t event_group = EM_EVENT_GROUP_UNDEF; - const internal_event_t *i_event = em_event_pointer(ctrl_event); - const int core_count = em_core_count(); /* All running EM cores */ - const int mask_count = em_core_mask_count(mask); /* Subset of cores*/ - int alloc_count = 0; - int sent_count = 0; - int unsent_count = mask_count; - int first_qidx; - int i; - em_event_t events[mask_count]; - - if (unlikely(num_notif > EM_EVENT_GROUP_MAX_NOTIF)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_INTERNAL_NOTIF, - "Too large notif table (%i)", num_notif); - return unsent_count; - } - - /* - * Set up internal notification when all cores are done. - */ - event_group = internal_done_w_notif_req(mask_count /*=evgrp count*/, - f_done_callback, f_done_arg_ptr, - num_notif, notif_tbl, - sync_operation); - if (unlikely(event_group == EM_EVENT_GROUP_UNDEF)) { - INTERNAL_ERROR(EM_ERR_NOT_FREE, EM_ESCOPE_INTERNAL_NOTIF, - "Internal 'done' notif setup failed"); - return unsent_count; - } - - /* - * Allocate ctrl events to be sent to the concerned cores. - * Reuse the input ctrl_event later so alloc one less. - * Copy content from input ctrl_event into all allocated events. - */ - for (i = 0; i < mask_count - 1; i++) { - events[i] = em_alloc(sizeof(internal_event_t), - EM_EVENT_TYPE_SW, - EM_POOL_DEFAULT); - if (unlikely(events[i] == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, - EM_ESCOPE_INTERNAL_NOTIF, - "Internal event alloc failed"); - goto err_free_resources; - } - alloc_count++; - - internal_event_t *i_event_tmp = em_event_pointer(events[i]); - /* Copy input event content */ - *i_event_tmp = *i_event; - } - /* Reuse the input event */ - events[i] = ctrl_event; - /* don't increment alloc_count++, caller frees input event on error */ - - /* - * Send ctrl events to the concerned cores - */ - first_qidx = queue_id2idx(FIRST_INTERNAL_UNSCHED_QUEUE); - - for (i = 0; i < core_count; i++) { - if (em_core_mask_isset(i, mask)) { - /* - * Send copy to each core-specific queue, - * track completion using an event group. - */ - err = em_send_group(events[sent_count], - queue_idx2hdl(first_qidx + i), - event_group); - if (unlikely(err != EM_OK)) { - INTERNAL_ERROR(err, EM_ESCOPE_INTERNAL_NOTIF, - "Event group send failed"); - goto err_free_resources; - } - sent_count++; - unsent_count--; - } - } - - return 0; /* Success, all ctrl events sent */ - - /* Error handling, free resources */ -err_free_resources: - for (i = sent_count; i < alloc_count; i++) - em_free(events[i]); - evgrp_abort_delete(event_group); - return unsent_count; -} - -/** - * Receive function for handling internal ctrl events, called by the daemon EO - */ -void internal_event_receive(void *eo_ctx, em_event_t event, em_event_type_t type, - em_queue_t queue, void *q_ctx) -{ - /* currently unused args */ - (void)eo_ctx; - (void)type; - (void)q_ctx; - - internal_event_t *i_event = em_event_pointer(event); - - if (unlikely(!i_event)) { - if (event != EM_EVENT_UNDEF) - em_free(event); /* unrecognized odp event type? */ - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC, - "Q:%" PRI_QUEUE ": Invalid event, evptr NULL", queue); - return; - } - - switch (i_event->id) { - /* - * Internal Done event - */ - case EM_INTERNAL_DONE: - i_event__internal_done(i_event); - break; - - /* - * Internal event related to Queue Group modification: add a core - */ - case QUEUE_GROUP_ADD_REQ: - i_event__qgrp_add_core_req(i_event); - break; - - /* - * Internal event related to Queue Group modification: remove a core - */ - case QUEUE_GROUP_REM_REQ: - i_event__qgrp_rem_core_req(i_event); - break; - /* - * Internal events related to EO local start&stop functionality - */ - case EO_START_LOCAL_REQ: - case EO_START_SYNC_LOCAL_REQ: - case EO_STOP_LOCAL_REQ: - case EO_STOP_SYNC_LOCAL_REQ: - case EO_REM_QUEUE_LOCAL_REQ: - case EO_REM_QUEUE_SYNC_LOCAL_REQ: - case EO_REM_QUEUE_ALL_LOCAL_REQ: - case EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ: - i_event__eo_local_func_call_req(i_event); - break; - - default: - INTERNAL_ERROR(EM_ERR_BAD_ID, - EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC, - "Internal ev-id:0x%" PRIx64 " Q:%" PRI_QUEUE "", - i_event->id, queue); - break; - } - - i_event->id = 0; - em_free(event); -} - -/** - * Handle the internal 'done' event - */ -static void -i_event__internal_done(const internal_event_t *i_ev) -{ - int num_notif; - em_status_t ret; - - /* Release the event group, we are done with it */ - ret = em_event_group_delete(i_ev->done.event_group); - - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_INTERNAL_DONE, - "Event group %" PRI_EGRP " delete failed (ret=%u)", - i_ev->done.event_group, ret); - - /* Call the callback function, performs custom actions at 'done' */ - if (i_ev->done.f_done_callback != NULL) - i_ev->done.f_done_callback(i_ev->done.f_done_arg_ptr); - - /* - * Send notification events if requested by the caller. - */ - num_notif = i_ev->done.num_notif; - - if (num_notif > 0) { - ret = send_notifs(num_notif, i_ev->done.notif_tbl); - if (unlikely(ret != EM_OK)) - INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_INTERNAL_DONE, - "em_send() of notifs(%d) failed", - num_notif); - } -} - -/** - * Helper func: Allocate & set up the internal 'done' event with - * function callbacks and notification events. Creates the needed event group - * and applies the event group count. A successful setup returns the event group - * ready for use with em_send_group(). - * - * @return An event group successfully 'applied' with count and notifications. - * @retval EM_EVENT_GROUP_UNDEF on error - * - * @see evgrp_abort_delete() below for deleting the event group returned by this - * function. - */ -em_event_group_t -internal_done_w_notif_req(int event_group_count, - void (*f_done_callback)(void *arg_ptr), - void *f_done_arg_ptr, - int num_notif, const em_notif_t notif_tbl[], - bool sync_operation) -{ - em_event_group_t event_group; - em_event_t event; - internal_event_t *i_event; - em_notif_t i_notif; - em_status_t err; - int i; - - event = em_alloc(sizeof(internal_event_t), EM_EVENT_TYPE_SW, - EM_POOL_DEFAULT); - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, - EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ, - "Internal event 'DONE' alloc failed!"); - return EM_EVENT_GROUP_UNDEF; - } - - event_group = em_event_group_create(); - if (unlikely(event_group == EM_EVENT_GROUP_UNDEF)) { - em_free(event); - INTERNAL_ERROR(EM_ERR_NOT_FREE, - EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ, - "Event group create failed!"); - return EM_EVENT_GROUP_UNDEF; - } - - i_event = em_event_pointer(event); - i_event->id = EM_INTERNAL_DONE; - i_event->done.event_group = event_group; - i_event->done.f_done_callback = f_done_callback; - i_event->done.f_done_arg_ptr = f_done_arg_ptr; - i_event->done.num_notif = num_notif; - - for (i = 0; i < num_notif; i++) { - i_event->done.notif_tbl[i].event = notif_tbl[i].event; - i_event->done.notif_tbl[i].queue = notif_tbl[i].queue; - i_event->done.notif_tbl[i].egroup = notif_tbl[i].egroup; - } - - i_notif.event = event; - if (sync_operation) { - i_notif.queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + - em_core_id()); - } else { - i_notif.queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); - } - i_notif.egroup = EM_EVENT_GROUP_UNDEF; - - /* - * Request sending of EM_INTERNAL_DONE when 'event_group_count' events - * in 'event_group' have been seen. The 'Done' event will trigger the - * notifications to be sent. - */ - err = em_event_group_apply(event_group, event_group_count, - 1, &i_notif); - if (unlikely(err != EM_OK)) { - INTERNAL_ERROR(err, EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ, - "Event group apply failed"); - em_free(event); - (void)em_event_group_delete(event_group); - return EM_EVENT_GROUP_UNDEF; - } - - return event_group; -} - -/** - * @brief internal_done_w_notif_req() 'companion' to abort and delete the - * event group created by the mentioned function. - */ -void evgrp_abort_delete(em_event_group_t event_group) -{ - em_notif_t free_notif_tbl[EM_EVENT_GROUP_MAX_NOTIF]; - - int num = em_event_group_get_notif(event_group, - EM_EVENT_GROUP_MAX_NOTIF, - free_notif_tbl); - em_status_t err = em_event_group_abort(event_group); - - if (err == EM_OK && num > 0) { - for (int i = 0; i < num; i++) - em_free(free_notif_tbl[i].event); - } - (void)em_event_group_delete(event_group); -} - -/** - * Helper func to send notifications events - */ -em_status_t -send_notifs(const int num_notif, const em_notif_t notif_tbl[]) -{ - int i; - em_status_t err; - em_status_t ret = EM_OK; - - for (i = 0; i < num_notif; i++) { - const em_event_t event = notif_tbl[i].event; - const em_queue_t queue = notif_tbl[i].queue; - const em_event_group_t egrp = notif_tbl[i].egroup; - - /* 'egroup' may be uninit in old appl code, check */ - if (invalid_egrp(egrp)) - err = em_send(event, queue); - else - err = em_send_group(event, queue, egrp); - - if (unlikely(err != EM_OK)) { - em_free(event); - if (ret == EM_OK) - ret = err; /* return the first error */ - } - } - - return ret; -} - -em_status_t -check_notif(const em_notif_t *const notif) -{ - if (unlikely(notif == NULL || notif->event == EM_EVENT_UNDEF)) - return EM_ERR_BAD_POINTER; - - const bool is_external = queue_external(notif->queue); - - if (!is_external) { - const queue_elem_t *q_elem = queue_elem_get(notif->queue); - - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) - return EM_ERR_NOT_FOUND; - } - - if (notif->egroup != EM_EVENT_GROUP_UNDEF) { - const event_group_elem_t *egrp_elem = - event_group_elem_get(notif->egroup); - - if (unlikely(egrp_elem == NULL || - !event_group_allocated(egrp_elem))) - return EM_ERR_BAD_ID; - } - - return EM_OK; -} - -em_status_t -check_notif_tbl(const int num_notif, const em_notif_t notif_tbl[]) -{ - em_status_t err; - int i; - - if (unlikely((unsigned int)num_notif > EM_EVENT_GROUP_MAX_NOTIF)) - return EM_ERR_TOO_LARGE; - - if (unlikely(num_notif > 0 && notif_tbl == NULL)) - return EM_ERR_BAD_POINTER; - - for (i = 0; i < num_notif; i++) { - err = check_notif(¬if_tbl[i]); - if (unlikely(err != EM_OK)) - return err; - } - - return EM_OK; -} - -/** - * @brief Helper for poll_unsched_ctrl_queue() - */ -static inline void -handle_ctrl_events(em_queue_t unsched_queue, - const em_event_t ev_tbl[], const int num) -{ - em_locm_t *const locm = &em_locm; - event_hdr_t *evhdr_tbl[num]; - - event_to_hdr_multi(ev_tbl, evhdr_tbl/*out*/, num); - - for (int i = 0; i < num; i++) { - /* - * Simulate a dispatch-round for the core-local ctrl event. - * Dispatch an unscheduled event as scheduled, be careful! - * Don't call dispatch enter/exit callbacks here. - */ - em_event_t event = ev_tbl[i]; - const event_hdr_t *ev_hdr = evhdr_tbl[i]; - em_event_type_t event_type = ev_hdr->event_type; - - /* Check and set core local event group */ - event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); - - internal_event_receive(NULL, event, event_type, - unsched_queue, NULL); - - /* - * Event belongs to an event_group, update the count and - * if requested send notifications - */ - if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) { - /* - * Atomically decrease the event group count. - * If the new count is zero, send notification events. - */ - event_group_count_decrement(1); - } - locm->current.egrp = EM_EVENT_GROUP_UNDEF; - } -} - -void poll_unsched_ctrl_queue(void) -{ - em_locm_t *const locm = &em_locm; - - queue_elem_t *core_unsch_qelem = locm->sync_api.ctrl_poll.core_unsched_qelem; - em_queue_t core_unsched_queue = locm->sync_api.ctrl_poll.core_unsched_queue; - - queue_elem_t *shared_unsch_qelem = locm->sync_api.ctrl_poll.shared_unsched_qelem; - em_queue_t shared_unsched_queue = locm->sync_api.ctrl_poll.shared_unsched_queue; - - em_locm_current_t current; - - const int deq_max = 16; - em_event_t core_ev_tbl[deq_max]; - em_event_t shared_ev_tbl[deq_max]; - int core_num; - int shared_num; - int round = 0; - - do { - core_num = queue_dequeue_multi(core_unsch_qelem, - core_ev_tbl/*out*/, deq_max); - shared_num = queue_dequeue_multi(shared_unsch_qelem, - shared_ev_tbl/*out*/, deq_max); - if (core_num <= 0 && shared_num <= 0) - break; /* no ctrl events, exit loop */ - - /* Save local current state the first time only */ - if (round == 0) { - current = locm->current; /* save */ - locm->current.rcv_multi_cnt = 1; - locm->current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE; - } - - if (core_num > 0) { - locm->current.q_elem = core_unsch_qelem; - locm->current.sched_q_elem = core_unsch_qelem; - handle_ctrl_events(core_unsched_queue, core_ev_tbl, core_num); - } - if (shared_num > 0) { - locm->current.q_elem = shared_unsch_qelem; - locm->current.sched_q_elem = shared_unsch_qelem; - handle_ctrl_events(shared_unsched_queue, shared_ev_tbl, shared_num); - } - - round++; - } while (true); - - if (round > 0) - locm->current = current; /* restore */ -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2014-2016, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * EM Internal Control + */ + +#include "em_include.h" + +static void +i_event__internal_done(const internal_event_t *i_ev); + +/** + * Sends an internal control event to each core set in 'mask'. + * + * When all cores set in 'mask' has processed the event an additional + * internal 'done' msg is sent to synchronize - this done-event can then + * trigger any notifications for the user that the operation was completed. + * Processing of the 'done' event will also call the 'f_done_callback' + * function if given. + * + * @return 0 on success, otherwise return the number of ctrl events that could + * not be sent due to error + */ +int +send_core_ctrl_events(const em_core_mask_t *const mask, em_event_t ctrl_event, + void (*f_done_callback)(void *arg_ptr), + void *f_done_arg_ptr, + int num_notif, const em_notif_t notif_tbl[], + bool sync_operation) +{ + em_status_t err; + em_event_group_t event_group = EM_EVENT_GROUP_UNDEF; + const internal_event_t *i_event = em_event_pointer(ctrl_event); + const int core_count = em_core_count(); /* All running EM cores */ + const int mask_count = em_core_mask_count(mask); /* Subset of cores*/ + int alloc_count = 0; + int sent_count = 0; + int unsent_count = mask_count; + int first_qidx; + int i; + em_event_t events[mask_count]; + + if (unlikely(num_notif > EM_EVENT_GROUP_MAX_NOTIF)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_INTERNAL_NOTIF, + "Too large notif table (%i)", num_notif); + return unsent_count; + } + + /* + * Set up internal notification when all cores are done. + */ + event_group = internal_done_w_notif_req(mask_count /*=evgrp count*/, + f_done_callback, f_done_arg_ptr, + num_notif, notif_tbl, + sync_operation); + if (unlikely(event_group == EM_EVENT_GROUP_UNDEF)) { + INTERNAL_ERROR(EM_ERR_NOT_FREE, EM_ESCOPE_INTERNAL_NOTIF, + "Internal 'done' notif setup failed"); + return unsent_count; + } + + /* + * Allocate ctrl events to be sent to the concerned cores. + * Reuse the input ctrl_event later so alloc one less. + * Copy content from input ctrl_event into all allocated events. + */ + for (i = 0; i < mask_count - 1; i++) { + events[i] = em_alloc(sizeof(internal_event_t), + EM_EVENT_TYPE_SW, + EM_POOL_DEFAULT); + if (unlikely(events[i] == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, + EM_ESCOPE_INTERNAL_NOTIF, + "Internal event alloc failed"); + goto err_free_resources; + } + alloc_count++; + + internal_event_t *i_event_tmp = em_event_pointer(events[i]); + /* Copy input event content */ + *i_event_tmp = *i_event; + } + /* Reuse the input event */ + events[i] = ctrl_event; + /* don't increment alloc_count++, caller frees input event on error */ + + /* + * Send ctrl events to the concerned cores + */ + first_qidx = queue_id2idx(FIRST_INTERNAL_UNSCHED_QUEUE); + + for (i = 0; i < core_count; i++) { + if (em_core_mask_isset(i, mask)) { + /* + * Send copy to each core-specific queue, + * track completion using an event group. + */ + err = em_send_group(events[sent_count], + queue_idx2hdl(first_qidx + i), + event_group); + if (unlikely(err != EM_OK)) { + INTERNAL_ERROR(err, EM_ESCOPE_INTERNAL_NOTIF, + "Event group send failed"); + goto err_free_resources; + } + sent_count++; + unsent_count--; + } + } + + return 0; /* Success, all ctrl events sent */ + + /* Error handling, free resources */ +err_free_resources: + for (i = sent_count; i < alloc_count; i++) + em_free(events[i]); + evgrp_abort_delete(event_group); + return unsent_count; +} + +/** + * Receive function for handling internal ctrl events, called by the daemon EO + */ +void internal_event_receive(void *eo_ctx, em_event_t event, em_event_type_t type, + em_queue_t queue, void *q_ctx) +{ + /* currently unused args */ + (void)eo_ctx; + (void)type; + (void)q_ctx; + + internal_event_t *i_event = em_event_pointer(event); + + if (unlikely(!i_event)) { + if (event != EM_EVENT_UNDEF) + em_free(event); /* unrecognized odp event type? */ + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC, + "Q:%" PRI_QUEUE ": Invalid event, evptr NULL", queue); + return; + } + + switch (i_event->id) { + /* + * Internal Done event + */ + case EM_INTERNAL_DONE: + i_event__internal_done(i_event); + break; + + /* + * Internal event related to Queue Group modification: add a core + */ + case QUEUE_GROUP_ADD_REQ: + i_event__qgrp_add_core_req(i_event); + break; + + /* + * Internal event related to Queue Group modification: remove a core + */ + case QUEUE_GROUP_REM_REQ: + i_event__qgrp_rem_core_req(i_event); + break; + /* + * Internal events related to EO local start&stop functionality + */ + case EO_START_LOCAL_REQ: + case EO_START_SYNC_LOCAL_REQ: + case EO_STOP_LOCAL_REQ: + case EO_STOP_SYNC_LOCAL_REQ: + case EO_REM_QUEUE_LOCAL_REQ: + case EO_REM_QUEUE_SYNC_LOCAL_REQ: + case EO_REM_QUEUE_ALL_LOCAL_REQ: + case EO_REM_QUEUE_ALL_SYNC_LOCAL_REQ: + i_event__eo_local_func_call_req(i_event); + break; + + default: + INTERNAL_ERROR(EM_ERR_BAD_ID, + EM_ESCOPE_INTERNAL_EVENT_RECV_FUNC, + "Internal ev-id:0x%" PRIx64 " Q:%" PRI_QUEUE "", + i_event->id, queue); + break; + } + + i_event->id = 0; + em_free(event); +} + +/** + * Handle the internal 'done' event + */ +static void +i_event__internal_done(const internal_event_t *i_ev) +{ + int num_notif; + em_status_t ret; + + /* Release the event group, we are done with it */ + ret = em_event_group_delete(i_ev->done.event_group); + + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_INTERNAL_DONE, + "Event group %" PRI_EGRP " delete failed (ret=%u)", + i_ev->done.event_group, ret); + + /* Call the callback function, performs custom actions at 'done' */ + if (i_ev->done.f_done_callback != NULL) + i_ev->done.f_done_callback(i_ev->done.f_done_arg_ptr); + + /* + * Send notification events if requested by the caller. + */ + num_notif = i_ev->done.num_notif; + + if (num_notif > 0) { + ret = send_notifs(num_notif, i_ev->done.notif_tbl); + if (unlikely(ret != EM_OK)) + INTERNAL_ERROR(ret, EM_ESCOPE_EVENT_INTERNAL_DONE, + "em_send() of notifs(%d) failed", + num_notif); + } +} + +/** + * Helper func: Allocate & set up the internal 'done' event with + * function callbacks and notification events. Creates the needed event group + * and applies the event group count. A successful setup returns the event group + * ready for use with em_send_group(). + * + * @return An event group successfully 'applied' with count and notifications. + * @retval EM_EVENT_GROUP_UNDEF on error + * + * @see evgrp_abort_delete() below for deleting the event group returned by this + * function. + */ +em_event_group_t +internal_done_w_notif_req(int event_group_count, + void (*f_done_callback)(void *arg_ptr), + void *f_done_arg_ptr, + int num_notif, const em_notif_t notif_tbl[], + bool sync_operation) +{ + em_event_group_t event_group; + em_event_t event; + internal_event_t *i_event; + em_notif_t i_notif; + em_status_t err; + + event = em_alloc(sizeof(internal_event_t), EM_EVENT_TYPE_SW, + EM_POOL_DEFAULT); + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, + EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ, + "Internal event 'DONE' alloc failed!"); + return EM_EVENT_GROUP_UNDEF; + } + + event_group = em_event_group_create(); + if (unlikely(event_group == EM_EVENT_GROUP_UNDEF)) { + em_free(event); + INTERNAL_ERROR(EM_ERR_NOT_FREE, + EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ, + "Event group create failed!"); + return EM_EVENT_GROUP_UNDEF; + } + + i_event = em_event_pointer(event); + i_event->id = EM_INTERNAL_DONE; + i_event->done.event_group = event_group; + i_event->done.f_done_callback = f_done_callback; + i_event->done.f_done_arg_ptr = f_done_arg_ptr; + i_event->done.num_notif = num_notif; + + for (int i = 0; i < num_notif; i++) { + i_event->done.notif_tbl[i].event = notif_tbl[i].event; + i_event->done.notif_tbl[i].queue = notif_tbl[i].queue; + i_event->done.notif_tbl[i].egroup = notif_tbl[i].egroup; + } + + i_notif.event = event; + if (sync_operation) { + i_notif.queue = queue_id2hdl(FIRST_INTERNAL_UNSCHED_QUEUE + + em_core_id()); + } else { + i_notif.queue = queue_id2hdl(SHARED_INTERNAL_UNSCHED_QUEUE); + } + i_notif.egroup = EM_EVENT_GROUP_UNDEF; + + /* + * Request sending of EM_INTERNAL_DONE when 'event_group_count' events + * in 'event_group' have been seen. The 'Done' event will trigger the + * notifications to be sent. + */ + err = em_event_group_apply(event_group, event_group_count, + 1, &i_notif); + if (unlikely(err != EM_OK)) { + INTERNAL_ERROR(err, EM_ESCOPE_INTERNAL_DONE_W_NOTIF_REQ, + "Event group apply failed"); + em_free(event); + (void)em_event_group_delete(event_group); + return EM_EVENT_GROUP_UNDEF; + } + + return event_group; +} + +/** + * @brief internal_done_w_notif_req() 'companion' to abort and delete the + * event group created by the mentioned function. + */ +void evgrp_abort_delete(em_event_group_t event_group) +{ + em_notif_t free_notif_tbl[EM_EVENT_GROUP_MAX_NOTIF]; + + int num = em_event_group_get_notif(event_group, + EM_EVENT_GROUP_MAX_NOTIF, + free_notif_tbl); + em_status_t err = em_event_group_abort(event_group); + + if (err == EM_OK && num > 0) { + for (int i = 0; i < num; i++) + em_free(free_notif_tbl[i].event); + } + (void)em_event_group_delete(event_group); +} + +/** + * Helper func to send notifications events + */ +em_status_t +send_notifs(const int num_notif, const em_notif_t notif_tbl[]) +{ + em_status_t err; + em_status_t ret = EM_OK; + + for (int i = 0; i < num_notif; i++) { + const em_event_t event = notif_tbl[i].event; + const em_queue_t queue = notif_tbl[i].queue; + const em_event_group_t egrp = notif_tbl[i].egroup; + + /* 'egroup' may be uninit in old appl code, check */ + if (invalid_egrp(egrp)) + err = em_send(event, queue); + else + err = em_send_group(event, queue, egrp); + + if (unlikely(err != EM_OK)) { + em_free(event); + if (ret == EM_OK) + ret = err; /* return the first error */ + } + } + + return ret; +} + +em_status_t +check_notif(const em_notif_t *const notif) +{ + if (unlikely(notif == NULL || notif->event == EM_EVENT_UNDEF)) + return EM_ERR_BAD_POINTER; + + const bool is_external = queue_external(notif->queue); + + if (!is_external) { + const queue_elem_t *q_elem = queue_elem_get(notif->queue); + + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) + return EM_ERR_NOT_FOUND; + } + + if (notif->egroup != EM_EVENT_GROUP_UNDEF) { + const event_group_elem_t *egrp_elem = + event_group_elem_get(notif->egroup); + + if (unlikely(egrp_elem == NULL || + !event_group_allocated(egrp_elem))) + return EM_ERR_BAD_ID; + } + + return EM_OK; +} + +em_status_t +check_notif_tbl(const int num_notif, const em_notif_t notif_tbl[]) +{ + em_status_t err; + + if (unlikely((unsigned int)num_notif > EM_EVENT_GROUP_MAX_NOTIF)) + return EM_ERR_TOO_LARGE; + + if (unlikely(num_notif > 0 && notif_tbl == NULL)) + return EM_ERR_BAD_POINTER; + + for (int i = 0; i < num_notif; i++) { + err = check_notif(¬if_tbl[i]); + if (unlikely(err != EM_OK)) + return err; + } + + return EM_OK; +} + +/** + * @brief Helper for poll_unsched_ctrl_queue() + */ +static inline void +handle_ctrl_events(em_queue_t unsched_queue, + const em_event_t ev_tbl[], const int num) +{ + em_locm_t *const locm = &em_locm; + event_hdr_t *evhdr_tbl[num]; + + event_to_hdr_multi(ev_tbl, evhdr_tbl/*out*/, num); + + for (int i = 0; i < num; i++) { + /* + * Simulate a dispatch-round for the core-local ctrl event. + * Dispatch an unscheduled event as scheduled, be careful! + * Don't call dispatch enter/exit callbacks here. + */ + em_event_t event = ev_tbl[i]; + const event_hdr_t *ev_hdr = evhdr_tbl[i]; + em_event_type_t event_type = ev_hdr->event_type; + + /* Check and set core local event group */ + event_group_set_local(ev_hdr->egrp, ev_hdr->egrp_gen, 1); + + internal_event_receive(NULL, event, event_type, + unsched_queue, NULL); + + /* + * Event belongs to an event_group, update the count and + * if requested send notifications + */ + if (locm->current.egrp != EM_EVENT_GROUP_UNDEF) { + /* + * Atomically decrease the event group count. + * If the new count is zero, send notification events. + */ + event_group_count_decrement(1); + } + locm->current.egrp = EM_EVENT_GROUP_UNDEF; + } +} + +void poll_unsched_ctrl_queue(void) +{ + em_locm_t *const locm = &em_locm; + + queue_elem_t *core_unsch_qelem = locm->sync_api.ctrl_poll.core_unsched_qelem; + em_queue_t core_unsched_queue = locm->sync_api.ctrl_poll.core_unsched_queue; + + queue_elem_t *shared_unsch_qelem = locm->sync_api.ctrl_poll.shared_unsched_qelem; + em_queue_t shared_unsched_queue = locm->sync_api.ctrl_poll.shared_unsched_queue; + + em_locm_current_t current; + + const int deq_max = 16; + em_event_t core_ev_tbl[deq_max]; + em_event_t shared_ev_tbl[deq_max]; + int core_num; + int shared_num; + int round = 0; + + do { + core_num = queue_dequeue_multi(core_unsch_qelem, + core_ev_tbl/*out*/, deq_max); + shared_num = queue_dequeue_multi(shared_unsch_qelem, + shared_ev_tbl/*out*/, deq_max); + if (core_num <= 0 && shared_num <= 0) + break; /* no ctrl events, exit loop */ + + /* Save local current state the first time only */ + if (round == 0) { + current = locm->current; /* save */ + locm->current.rcv_multi_cnt = 1; + locm->current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE; + } + + if (core_num > 0) { + locm->current.q_elem = core_unsch_qelem; + locm->current.sched_q_elem = core_unsch_qelem; + handle_ctrl_events(core_unsched_queue, core_ev_tbl, core_num); + } + if (shared_num > 0) { + locm->current.q_elem = shared_unsch_qelem; + locm->current.sched_q_elem = shared_unsch_qelem; + handle_ctrl_events(shared_unsched_queue, shared_ev_tbl, shared_num); + } + + round++; + } while (true); + + if (round > 0) + locm->current = current; /* restore */ +} diff --git a/src/em_libconfig.c b/src/em_libconfig.c index 008ba5ec..311aecb1 100644 --- a/src/em_libconfig.c +++ b/src/em_libconfig.c @@ -1,303 +1,577 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Copyright (c) 2018, Linaro Limited - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include "em_include.h" -#include "include/em_libconfig_config.h" - -int em_libconfig_init_global(libconfig_t *libconfig) -{ - const char *filename; - const char *vers; - const char *vers_rt; - const char *impl; - const char *impl_rt; - config_t *config = &libconfig->cfg_default; - config_t *config_rt = &libconfig->cfg_runtime; - const char *impl_field = "em_implementation"; - const char *vers_field = "config_file_version"; - - config_init(config); - config_init(config_rt); - libconfig->has_cfg_runtime = 0; - - if (!config_read_string(config, config_builtin)) { - EM_PRINT("Failed to read default config: %s(%d): %s\n", - config_error_file(config), config_error_line(config), - config_error_text(config)); - goto fail; - } - - filename = getenv("EM_CONFIG_FILE"); - if (filename == NULL) - return 0; - - EM_PRINT("EM CONFIG FILE: %s\n", filename); - - if (!config_read_file(config_rt, filename)) { - EM_PRINT(" ERROR: failed to read config file: %s(%d): %s\n\n", - config_error_file(config_rt), - config_error_line(config_rt), - config_error_text(config_rt)); - goto fail; - } - - /* Check runtime configuration's implementation name and version */ - if (!config_lookup_string(config, impl_field, &impl) || - !config_lookup_string(config_rt, impl_field, &impl_rt)) { - EM_PRINT(" ERROR: missing mandatory field: %s\n\n", - impl_field); - goto fail; - } - if (!config_lookup_string(config, vers_field, &vers) || - !config_lookup_string(config_rt, vers_field, &vers_rt)) { - EM_PRINT(" ERROR: missing mandatory field: %s\n\n", - vers_field); - goto fail; - } - if (strcmp(impl, impl_rt)) { - EM_PRINT(" ERROR: EM implementation name mismatch:\n" - " Expected: \"%s\"\n" - " Found: \"%s\"\n\n", impl, impl_rt); - goto fail; - } - if (strcmp(vers, vers_rt)) { - EM_PRINT(" ERROR: config file version number mismatch:\n" - " Expected: \"%s\"\n" - " Found: \"%s\"\n\n", vers, vers_rt); - goto fail; - } - - libconfig->has_cfg_runtime = 1; - return 0; -fail: - EM_PRINT("Config file failure\n"); - config_destroy(config); - config_destroy(config_rt); - return -1; -} - -int em_libconfig_term_global(libconfig_t *libconfig) -{ - config_destroy(&libconfig->cfg_default); - config_destroy(&libconfig->cfg_runtime); - - return 0; -} - -int em_libconfig_lookup_int(const libconfig_t *libconfig, const char *path, - int *value /*out*/) -{ - int ret_def = CONFIG_FALSE; - int ret_rt = CONFIG_FALSE; - - ret_def = config_lookup_int(&libconfig->cfg_default, path, value); - - /* Runtime option overrides default value */ - ret_rt = config_lookup_int(&libconfig->cfg_runtime, path, value); - - return (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) ? 1 : 0; -} - -int em_libconfig_lookup_int64(const libconfig_t *libconfig, const char *path, - int64_t *value /*out*/) -{ - int ret_def = CONFIG_FALSE; - int ret_rt = CONFIG_FALSE; - long long value_ll = 0; - - ret_def = config_lookup_int64(&libconfig->cfg_default, path, &value_ll); - - /* Runtime option overrides default value */ - ret_rt = config_lookup_int64(&libconfig->cfg_runtime, path, &value_ll); - - if (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) { - *value = (int64_t)value_ll; - return 1; /* success! */ - } - - return 0; /* fail */ -} - -int em_libconfig_lookup_bool(const libconfig_t *libconfig, const char *path, - bool *value /*out*/) -{ - int ret_def = CONFIG_FALSE; - int ret_rt = CONFIG_FALSE; - int cfg_value = 0; - int ret_val = 0; - - ret_def = config_lookup_bool(&libconfig->cfg_default, path, &cfg_value); - - /* Runtime option overrides default value */ - ret_rt = config_lookup_bool(&libconfig->cfg_runtime, path, &cfg_value); - - if (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) { - *value = cfg_value ? true : false; - ret_val = 1; - } - - return ret_val; -} - -int em_libconfig_lookup_string(const libconfig_t *libconfig, const char *path, - const char **value /*out*/) -{ - int ret_def = CONFIG_FALSE; - int ret_rt = CONFIG_FALSE; - - ret_def = config_lookup_string(&libconfig->cfg_default, path, value); - - /* Runtime option overrides default value */ - ret_rt = config_lookup_string(&libconfig->cfg_runtime, path, value); - - return (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) ? 1 : 0; -} - -int em_libconfig_lookup_array(const libconfig_t *libconfig, const char *path, - int value[/*out*/], int max_num) -{ - const config_t *config; - const config_setting_t *setting; - int num; - int num_out = 0; - - for (int j = 0; j < 2; j++) { - if (j == 0) - config = &libconfig->cfg_default; - else - config = &libconfig->cfg_runtime; - - setting = config_lookup(config, path); - - /* Runtime config may not define the array, whereas - * the default config has it always defined. When the array - * is defined, it must be correctly formatted. - */ - if (setting == NULL) - continue; - - if (config_setting_is_array(setting) == CONFIG_FALSE) - return 0; - - num = config_setting_length(setting); - - if (num <= 0 || num > max_num) - return 0; - - for (int i = 0; i < num; i++) - value[i] = config_setting_get_int_elem(setting, i); - - num_out = num; - } - - /* Number of elements copied */ - return num_out; -} - -static int lookup_int(const config_t *cfg, - const char *base_path, - const char *local_path, - const char *name, - int *value /*out*/) -{ - char path[256]; - - if (local_path) { - snprintf(path, sizeof(path), "%s.%s.%s", base_path, - local_path, name); - if (config_lookup_int(cfg, path, value) == CONFIG_TRUE) - return 1; - } - - snprintf(path, sizeof(path), "%s.%s", base_path, name); - if (config_lookup_int(cfg, path, value) == CONFIG_TRUE) - return 1; - - return 0; -} - -int em_libconfig_lookup_ext_int(const libconfig_t *libconfig, - const char *base_path, const char *local_path, - const char *name, int *value /*out*/) -{ - if (lookup_int(&libconfig->cfg_runtime, - base_path, local_path, name, value)) - return 1; - - if (lookup_int(&libconfig->cfg_default, - base_path, local_path, name, value)) - return 1; - - return 0; -} - -int em_libconfig_print(const libconfig_t *libconfig) -{ - int c; - /* Temp file for config_write() output. Suppress Coverity warning about tmpfile() usage. */ - /* coverity[secure_temp] */ - FILE *file = tmpfile(); - - if (file == NULL) - return -1; - - if (fprintf(file, - "\nEM_CONFIG_FILE default values:\n" - "-------------------------------\n\n") < 0) - goto fail; - - config_write(&libconfig->cfg_default, file); - - if (libconfig->has_cfg_runtime) { - if (fprintf(file, - "\nEM_CONFIG_FILE override values:\n" - "--------------------------------\n\n") < 0) - goto fail; - - config_write(&libconfig->cfg_runtime, file); - } - - /* Print temp file to the log */ - rewind(file); - while ((c = fgetc(file)) != EOF) - EM_PRINT("%c", (char)c); - - fclose(file); - return 0; - -fail: - fclose(file); - return -1; -} +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Copyright (c) 2018, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "em_include.h" +#include "include/em_libconfig_config.h" + +#define SETTING_NAME_LEN 64 +#define SETTING_PATH_LEN 256 + +int em_libconfig_init_global(libconfig_t *libconfig) +{ + const char *filename; + const char *vers; + const char *vers_rt; + const char *impl; + const char *impl_rt; + config_t *config = &libconfig->cfg_default; + config_t *config_rt = &libconfig->cfg_runtime; + const char *impl_field = "em_implementation"; + const char *vers_field = "config_file_version"; + + config_init(config); + config_init(config_rt); + libconfig->has_cfg_runtime = 0; + + if (!config_read_string(config, config_builtin)) { + EM_PRINT("Failed to read default config: %s(%d): %s\n", + config_error_file(config), config_error_line(config), + config_error_text(config)); + goto fail; + } + + filename = getenv("EM_CONFIG_FILE"); + if (filename == NULL) + return 0; + + EM_PRINT("EM CONFIG FILE: %s\n", filename); + + if (!config_read_file(config_rt, filename)) { + EM_PRINT(" ERROR: failed to read config file: %s(%d): %s\n\n", + config_error_file(config_rt), + config_error_line(config_rt), + config_error_text(config_rt)); + goto fail; + } + + /* Check runtime configuration's implementation name and version */ + if (!config_lookup_string(config, impl_field, &impl) || + !config_lookup_string(config_rt, impl_field, &impl_rt)) { + EM_PRINT(" ERROR: missing mandatory field: %s\n\n", + impl_field); + goto fail; + } + if (!config_lookup_string(config, vers_field, &vers) || + !config_lookup_string(config_rt, vers_field, &vers_rt)) { + EM_PRINT(" ERROR: missing mandatory field: %s\n\n", + vers_field); + goto fail; + } + if (strcmp(impl, impl_rt)) { + EM_PRINT(" ERROR: EM implementation name mismatch:\n" + " Expected: \"%s\"\n" + " Found: \"%s\"\n\n", impl, impl_rt); + goto fail; + } + if (strcmp(vers, vers_rt)) { + EM_PRINT(" ERROR: config file version number mismatch:\n" + " Expected: \"%s\"\n" + " Found: \"%s\"\n\n", vers, vers_rt); + goto fail; + } + + libconfig->has_cfg_runtime = 1; + return 0; +fail: + EM_PRINT("Config file failure\n"); + config_destroy(config); + config_destroy(config_rt); + return -1; +} + +int em_libconfig_term_global(libconfig_t *libconfig) +{ + config_destroy(&libconfig->cfg_default); + config_destroy(&libconfig->cfg_runtime); + + return 0; +} + +int em_libconfig_lookup_int(const libconfig_t *libconfig, const char *path, + int *value /*out*/) +{ + int ret_def = CONFIG_FALSE; + int ret_rt = CONFIG_FALSE; + + ret_def = config_lookup_int(&libconfig->cfg_default, path, value); + + /* Runtime option overrides default value */ + ret_rt = config_lookup_int(&libconfig->cfg_runtime, path, value); + + return (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) ? 1 : 0; +} + +int em_libconfig_lookup_int64(const libconfig_t *libconfig, const char *path, + int64_t *value /*out*/) +{ + int ret_def = CONFIG_FALSE; + int ret_rt = CONFIG_FALSE; + long long value_ll = 0; + + ret_def = config_lookup_int64(&libconfig->cfg_default, path, &value_ll); + + /* Runtime option overrides default value */ + ret_rt = config_lookup_int64(&libconfig->cfg_runtime, path, &value_ll); + + if (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) { + *value = (int64_t)value_ll; + return 1; /* success! */ + } + + return 0; /* fail */ +} + +int em_libconfig_lookup_bool(const libconfig_t *libconfig, const char *path, + bool *value /*out*/) +{ + int ret_def = CONFIG_FALSE; + int ret_rt = CONFIG_FALSE; + int cfg_value = 0; + int ret_val = 0; + + ret_def = config_lookup_bool(&libconfig->cfg_default, path, &cfg_value); + + /* Runtime option overrides default value */ + ret_rt = config_lookup_bool(&libconfig->cfg_runtime, path, &cfg_value); + + if (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) { + *value = cfg_value ? true : false; + ret_val = 1; + } + + return ret_val; +} + +int em_libconfig_lookup_string(const libconfig_t *libconfig, const char *path, + const char **value /*out*/) +{ + int ret_def = CONFIG_FALSE; + int ret_rt = CONFIG_FALSE; + + ret_def = config_lookup_string(&libconfig->cfg_default, path, value); + + /* Runtime option overrides default value */ + ret_rt = config_lookup_string(&libconfig->cfg_runtime, path, value); + + return (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) ? 1 : 0; +} + +int em_libconfig_lookup_array(const libconfig_t *libconfig, const char *path, + int value[/*out*/], int max_num) +{ + const config_t *config; + const config_setting_t *setting; + int num; + int num_out = 0; + + for (int j = 0; j < 2; j++) { + if (j == 0) + config = &libconfig->cfg_default; + else + config = &libconfig->cfg_runtime; + + setting = config_lookup(config, path); + + /* Runtime config may not define the array, whereas + * the default config has it always defined. When the array + * is defined, it must be correctly formatted. + */ + if (setting == NULL) + continue; + + if (config_setting_is_array(setting) == CONFIG_FALSE) + return 0; + + num = config_setting_length(setting); + + if (num <= 0 || num > max_num) + return 0; + + for (int i = 0; i < num; i++) + value[i] = config_setting_get_int_elem(setting, i); + + num_out = num; + } + + /* Number of elements copied */ + return num_out; +} + +void em_libconfig_lookup(const libconfig_t *libconfig, const char *path, + libconfig_setting_t **setting_default/*out*/, + libconfig_setting_t **setting_runtime/*out*/) +{ + *setting_default = config_lookup(&libconfig->cfg_default, path); + *setting_runtime = config_lookup(&libconfig->cfg_runtime, path); +} + +int em_libconfig_setting_lookup_int(const libconfig_setting_t *setting, + const char *name, int *value/*out*/) +{ + return config_setting_lookup_int(setting, name, value); +} + +const libconfig_list_t * +em_libconfig_setting_get_list(const libconfig_setting_t *setting, const char *name) +{ + const libconfig_list_t *list_setting; + + list_setting = config_setting_get_member(setting, name); + + if (list_setting && config_setting_is_list(list_setting)) + return list_setting; + + return NULL; +} + +int em_libconfig_list_length(const libconfig_list_t *list) +{ + return config_setting_length(list); +} + +static uint32_t path_get_depth(const char *path, char delim) +{ + const char *p = path; + uint32_t depth = 1; /*Depth is 1 when path contains no delimiter*/ + + while (*p) { + if (*p == delim) + depth++; + p++; + } + + return depth; +} + +/* Get second last setting and the last setting name specified in path from the + * list element at index. More specifically, for path 'a.b.c.d', this function + * gets second last setting 'c' from list element at index and the last setting + * name 'd'. + */ +static int setting_get_child(const config_setting_t *parent, const char *path, + const char *delim, const uint32_t depth, + char *name/*out*/, config_setting_t **child/*out*/) +{ + char *saveptr; /*Used internally by strtok_r()*/ + const char *member_name; + char path_cp[SETTING_PATH_LEN]; + + /* strtok requires non const char pointer */ + strncpy(path_cp, path, SETTING_PATH_LEN - 1); + path_cp[SETTING_PATH_LEN - 1] = '\0'; + + /* Get second last setting */ + member_name = strtok_r(path_cp, delim, &saveptr); + for (uint32_t i = 0; i < depth - 1; i++) { + *child = config_setting_get_member(parent, member_name); + + if (!(*child)) + return -1; + + parent = *child; + member_name = strtok_r(NULL, delim, &saveptr); + } + + /* Get last setting name */ + strncpy(name, member_name, SETTING_NAME_LEN - 1); + name[SETTING_NAME_LEN - 1] = '\0'; + return 0; +} + +/* Get second last setting and the last setting name specified in path from the + * list element at index. More specifically, for path 'a.b.c.d', this function + * gets second last setting 'c' from list element at index and the last setting + * name 'd'. + * + * name[out] Pointer where last setting name will be stored + * setting[out] Ponter where second last setting will be stored + */ +static int list_get_setting(const libconfig_list_t *list, int index, + const char *path, char *name/*out*/, + config_setting_t **setting/*out*/) +{ + uint32_t depth; + config_setting_t *element; + char delim[] = "."; + + element = config_setting_get_elem(list, index); + if (!element) { + EM_LOG(EM_LOG_ERR, "List element %d does not exist\n", index); + return -1; + } + + depth = path_get_depth(path, delim[0]); + if (depth < 2) {/*Only one level of setting in path, e.g., 'a'*/ + *setting = element; + strncpy(name, path, SETTING_NAME_LEN - 1); + name[SETTING_NAME_LEN - 1] = '\0'; + return 0; + } + + /*Get second last setting and the last setting name*/ + return setting_get_child(element, path, delim, depth, name, setting); +} + +libconfig_group_t *em_libconfig_list_lookup_group(const libconfig_list_t *list, + int index, const char *path) +{ + char name[SETTING_NAME_LEN]; + config_setting_t *setting; + libconfig_group_t *group; + + if (list_get_setting(list, index, path, name, &setting) < 0) + return NULL; + + group = config_setting_get_member(setting, name); + if (group && config_setting_is_group(group)) + return group; + + return NULL; +} + +int em_libconfig_list_lookup_int(const libconfig_list_t *list, int index, + const char *path, int *value/*out*/) +{ + char name[SETTING_NAME_LEN]; + config_setting_t *setting; + const config_setting_t *member; + + if (list_get_setting(list, index, path, name, &setting) < 0) + return -1; /*Parent setting not found*/ + + member = config_setting_get_member(setting, name); + if (!member) /*Setting not found*/ + return -1; + + return config_setting_lookup_int(setting, name, value); +} + +int em_libconfig_list_lookup_bool(const libconfig_list_t *list, int index, + const char *path, bool *value/*out*/) +{ + int cfg_value; + char name[SETTING_NAME_LEN]; + config_setting_t *setting; + const config_setting_t *member; + + if (list_get_setting(list, index, path, name, &setting) < 0) + return -1; /*Parent setting not found*/ + + member = config_setting_get_member(setting, name); + if (!member) /*Setting not found*/ + return -1; + + if (!config_setting_lookup_bool(setting, name, &cfg_value)) + return 0; + + *value = cfg_value ? true : false; + return 1; +} + +int em_libconfig_list_lookup_string(const libconfig_list_t *list, int index, + const char *path, const char **value/*out*/) +{ + char name[SETTING_NAME_LEN]; + config_setting_t *setting; + const config_setting_t *member; + + if (list_get_setting(list, index, path, name, &setting) < 0) + return -1; /*Parent setting not found*/ + + member = config_setting_get_member(setting, name); + if (!member) /*Setting not found*/ + return -1; + + return config_setting_lookup_string(setting, name, value); +} + +/* Get second last setting and the last setting name specified in path from + * the given group. More specifically, for path 'a.b.c.d', this function + * gets second last setting 'c' from group and the last setting name 'd'. + * + * name[out] Pointer where last setting name will be stored + * setting[out] Ponter where second last setting will be stored + */ +static int group_get_setting(libconfig_list_t *group, const char *path, + char *name/*out*/, config_setting_t **setting/*out*/) +{ + uint32_t depth; + char delim[] = "."; + + depth = path_get_depth(path, delim[0]); + if (depth < 2) {/*No child setting*/ + *setting = group; + strncpy(name, path, SETTING_NAME_LEN - 1); + name[SETTING_NAME_LEN - 1] = '\0'; + return 0; + } + + /*Get child setting*/ + return setting_get_child(group, path, delim, depth, name, setting); +} + +libconfig_group_t +*em_libconfig_group_lookup_group(libconfig_group_t *group, const char *path) +{ + char name[SETTING_NAME_LEN]; + config_setting_t *setting; + libconfig_group_t *group_out; + + if (group_get_setting(group, path, name, &setting) < 0) + return NULL; + + group_out = config_setting_get_member(setting, name); + if (group_out && config_setting_is_group(group_out)) + return group_out; + + return NULL; +} + +libconfig_list_t +*em_libconfig_group_lookup_list(libconfig_group_t *group, const char *path) +{ + libconfig_list_t *list; + config_setting_t *setting; + char name[SETTING_NAME_LEN]; + + if (group_get_setting(group, path, name, &setting) < 0) + return NULL; + + list = config_setting_get_member(setting, name); + if (list && config_setting_is_list(list)) + return list; + + return NULL; +} + +int em_libconfig_group_lookup_int(const libconfig_group_t *group, + const char *name, int *value/*out*/) +{ + return config_setting_lookup_int(group, name, value); +} + +int em_libconfig_group_lookup_bool(const libconfig_group_t *group, + const char *name, bool *value/*out*/) +{ + int cfg_value; + + if (!config_setting_lookup_bool(group, name, &cfg_value)) + return 0; + + *value = cfg_value ? true : false; + return 1; +} + +int em_libconfig_group_lookup_string(const libconfig_group_t *group, + const char *name, const char **value/*out*/) +{ + return config_setting_lookup_string(group, name, value); +} + +static int lookup_int(const config_t *cfg, + const char *base_path, + const char *local_path, + const char *name, + int *value /*out*/) +{ + char path[256]; + + if (local_path) { + snprintf(path, sizeof(path), "%s.%s.%s", base_path, + local_path, name); + if (config_lookup_int(cfg, path, value) == CONFIG_TRUE) + return 1; + } + + snprintf(path, sizeof(path), "%s.%s", base_path, name); + if (config_lookup_int(cfg, path, value) == CONFIG_TRUE) + return 1; + + return 0; +} + +int em_libconfig_lookup_ext_int(const libconfig_t *libconfig, + const char *base_path, const char *local_path, + const char *name, int *value /*out*/) +{ + if (lookup_int(&libconfig->cfg_runtime, + base_path, local_path, name, value)) + return 1; + + if (lookup_int(&libconfig->cfg_default, + base_path, local_path, name, value)) + return 1; + + return 0; +} + +int em_libconfig_print(const libconfig_t *libconfig) +{ + int c; + /* Temp file for config_write() output. Suppress Coverity warning about tmpfile() usage. */ + /* coverity[secure_temp] */ + FILE *file = tmpfile(); + + if (file == NULL) + return -1; + + if (fprintf(file, + "\nEM_CONFIG_FILE default values:\n" + "-------------------------------\n\n") < 0) + goto fail; + + config_write(&libconfig->cfg_default, file); + + if (libconfig->has_cfg_runtime) { + if (fprintf(file, + "\nEM_CONFIG_FILE override values:\n" + "--------------------------------\n\n") < 0) + goto fail; + + config_write(&libconfig->cfg_runtime, file); + } + + /* Print temp file to the log */ + rewind(file); + while ((c = fgetc(file)) != EOF) + EM_PRINT("%c", (char)c); + + fclose(file); + return 0; + +fail: + fclose(file); + return -1; +} diff --git a/src/em_libconfig.h b/src/em_libconfig.h index 3275982c..f9251cff 100644 --- a/src/em_libconfig.h +++ b/src/em_libconfig.h @@ -1,136 +1,277 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Copyright (c) 2018, Linaro Limited - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef EM_LIBCONFIG_H_ -#define EM_LIBCONFIG_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * Reads the default config and runtime config (if given) to shm and checks - * its mandatory fields. - * - * @param libconfig Pointer to shared libconfig data - * @return int 0 on success, -1 on error - */ -int em_libconfig_init_global(libconfig_t *libconfig); - -/** - * Destroys the configs. - * - * @param libconfig Pointer to shared libconfig data - * @return int 0 - */ -int em_libconfig_term_global(libconfig_t *libconfig); - -/** - * Reads integer from runtime config if given, otherwise default config. - * - * @param libconfig Pointer to shared libconfig data - * @param path Path to value - * @param[out] value Pointer where read value will be stored - * @return int 1 on success, 0 otherwise - */ -int em_libconfig_lookup_int(const libconfig_t *libconfig, const char *path, - int *value /*out*/); - -int em_libconfig_lookup_int64(const libconfig_t *libconfig, const char *path, - int64_t *value /*out*/); - -/** - * Reads a boolean from runtime config if given, otherwise default config. - * - * @param libconfig Pointer to shared libconfig data - * @param path Path to value - * @param[out] value Pointer where read value will be stored - * @return int 1 on success, 0 otherwise - */ -int em_libconfig_lookup_bool(const libconfig_t *libconfig, - const char *path, bool *value /*out*/); - -/** - * Reads a string from runtime config if given, otherwise default config. - * - * @param libconfig Pointer to shared libconfig data - * @param path Path to value - * @param[out] value Pointer where read value will be stored - * @return int 1 on success, 0 otherwise - */ -int em_libconfig_lookup_string(const libconfig_t *libconfig, const char *path, - const char **value /*out*/); - -/** - * Reads an arrays of integers from runtime config if given, otherwise from - * default config. - * - * @param libconfig Pointer to shared libconfig data - * @param path Path to value - * @param[out] value Pointer where read array will be stored - * @param max_num Max number of elements in the array - * @return int Number of read elements - */ -int em_libconfig_lookup_array(const libconfig_t *libconfig, const char *path, - int value[/*out*/], int max_num); - -/** - * Reads integer from runtime config if given, otherwise default config. Path - * to config variable is assumed to be base_path.local_path.name. - * - * @param libconfig Pointer to shared libconfig data - * @param base_path Basepath to value - * @param local_path Localpath to value - * @param name Value name - * @param[out] value Pointer where read value will be stored - * @return int 1 on success, 0 otherwise - */ -int em_libconfig_lookup_ext_int(const libconfig_t *libconfig, - const char *base_path, const char *local_path, - const char *name, int *value /*out*/); - -/** - * Prints default config and runtime config (if given). - * - * @param libconfig Pointer to shared libconfig data - * @return int 1 on success, 0 otherwise - */ -int em_libconfig_print(const libconfig_t *libconfig); - -#ifdef __cplusplus -} -#endif - -#endif +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Copyright (c) 2018, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef EM_LIBCONFIG_H_ +#define EM_LIBCONFIG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Reads the default config and runtime config (if given) to shm and checks + * its mandatory fields. + * + * @param libconfig Pointer to shared libconfig data + * @return int 0 on success, -1 on error + */ +int em_libconfig_init_global(libconfig_t *libconfig); + +/** + * Destroys the configs. + * + * @param libconfig Pointer to shared libconfig data + * @return int 0 + */ +int em_libconfig_term_global(libconfig_t *libconfig); + +/** + * Reads integer from runtime config if given, otherwise default config. + * + * @param libconfig Pointer to shared libconfig data + * @param path Path to value + * @param[out] value Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_lookup_int(const libconfig_t *libconfig, const char *path, + int *value /*out*/); + +int em_libconfig_lookup_int64(const libconfig_t *libconfig, const char *path, + int64_t *value /*out*/); + +/** + * Reads a boolean from runtime config if given, otherwise default config. + * + * @param libconfig Pointer to shared libconfig data + * @param path Path to value + * @param[out] value Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_lookup_bool(const libconfig_t *libconfig, + const char *path, bool *value /*out*/); + +/** + * Reads a string from runtime config if given, otherwise default config. + * + * @param libconfig Pointer to shared libconfig data + * @param path Path to value + * @param[out] value Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_lookup_string(const libconfig_t *libconfig, const char *path, + const char **value /*out*/); + +/** + * Reads an arrays of integers from runtime config if given, otherwise from + * default config. + * + * @param libconfig Pointer to shared libconfig data + * @param path Path to value + * @param[out] value Pointer where read array will be stored + * @param max_num Max number of elements in the array + * @return int Number of read elements + */ +int em_libconfig_lookup_array(const libconfig_t *libconfig, const char *path, + int value[/*out*/], int max_num); + +/** + * Reads integer from runtime config if given, otherwise default config. Path + * to config variable is assumed to be base_path.local_path.name. + * + * @param libconfig Pointer to shared libconfig data + * @param base_path Basepath to value + * @param local_path Localpath to value + * @param name Value name + * @param[out] value Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_lookup_ext_int(const libconfig_t *libconfig, + const char *base_path, const char *local_path, + const char *name, int *value /*out*/); + +/** + * Read setting specified by 'path' from both default and runtime config. + * + * @param libconfig Pointer to shared libconfig data + * @param path Path to setting + * @param[out] setting_default Pointer where setting from default conf file will be stored + * @param[out] setting_runtime Pointer where setting from runtime conf file will be stored + */ +void em_libconfig_lookup(const libconfig_t *libconfig, const char *path, + libconfig_setting_t **setting_default/*out*/, + libconfig_setting_t **setting_runtime/*out*/); + +/** + * Read an integer named 'name' from a setting. + * + * @param setting Pointer to the setting where integer is read + * @param name Value name + * @param[out] value Pointer where read integer value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_setting_lookup_int(const libconfig_setting_t *setting, + const char *name, int *value/*out*/); + +/** + * Fetch a list named 'name' from a setting. + * + * @param setting Pointer to the setting where list is fetched + * @param name List name + * @return Requested list on success, NULL otherwise + */ +const libconfig_list_t +*em_libconfig_setting_get_list(const libconfig_setting_t *setting, const char *name); + +/** + * Return the number of elements in a list. + * + * @param list Pointer to list + * @return int The number of elements in a list + */ +int em_libconfig_list_length(const libconfig_list_t *list); + +/** + * Get a group setting from a list. + * + * @param list Pointer to list where group is fetched + * @param index Index to list element + * @param path Path to the group setting + * @return Requested group on success, NULL otherwise + */ +libconfig_group_t *em_libconfig_list_lookup_group(const libconfig_list_t *list, + int index, const char *path); + +/** + * Read an integer from a list. + * + * @param list Pointer to list where integer is read + * @param index Index to list element + * @param path Path to integer value + * @param value[out] Pointer where read value will be stored + * @return int 1 on success, 0 wrong type, -1 not found + */ +int em_libconfig_list_lookup_int(const libconfig_list_t *list, int index, + const char *path, int *value/*out*/); + +/** + * Read a bool from a list. + * + * @param list Pointer to list + * @param index Index to list element + * @param path Path to boolean value + * @param value[out] Pointer where read value will be stored + * @return int 1 on success, 0 wrong type, -1 not found + */ +int em_libconfig_list_lookup_bool(const libconfig_list_t *list, int index, + const char *path, bool *value/*out*/); + +/** + * Read string from a list. + * + * @param list Pointer to list + * @param index Index to list element + * @param path Path to string value + * @param value[out] Pointer where read value will be stored + * @return int 1 on success, 0 wrong type, -1 not found + */ +int em_libconfig_list_lookup_string(const libconfig_list_t *list, int index, + const char *path, const char **value/*out*/); + +/** + * Get a group setting from a group. + * + * @param group Pointer to group + * @param path Path to the group to be fetched + * @return Requested group on success, NULL otherwise + */ +libconfig_group_t +*em_libconfig_group_lookup_group(libconfig_group_t *group, const char *path); + +/** + * Fetch a list from a group. + * + * @param group Pointer to group + * @param path Path to the list to be fetched + * @return Requested list on success, NULL otherwise + */ +libconfig_list_t +*em_libconfig_group_lookup_list(libconfig_list_t *group, const char *path); + +/** + * Read an integer from a group. + * + * @param group Pointer to group + * @param name Name of integer value + * @param value[out] Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_group_lookup_int(const libconfig_group_t *group, + const char *name, int *value/*out*/); + +/** + * Read a bool from a group. + * + * @param group Pointer to group + * @param name Name of boolean value + * @param value[out] Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_group_lookup_bool(const libconfig_group_t *group, + const char *name, bool *value/*out*/); + +/** + * Read string from a group. + * + * @param group Pointer to group + * @param name Name of the string to be fetched + * @param value[out] Pointer where read value will be stored + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_group_lookup_string(const libconfig_group_t *group, + const char *name, const char **value/*out*/); + +/** + * Prints default config and runtime config (if given). + * + * @param libconfig Pointer to shared libconfig data + * @return int 1 on success, 0 otherwise + */ +int em_libconfig_print(const libconfig_t *libconfig); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/em_libconfig_types.h b/src/em_libconfig_types.h index a3ea91fb..8d356b10 100644 --- a/src/em_libconfig_types.h +++ b/src/em_libconfig_types.h @@ -1,56 +1,63 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Copyright (c) 2018, Linaro Limited - * All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef EM_LIBCONFIG_TYPES_H_ -#define EM_LIBCONFIG_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -typedef struct libconfig { - /** Runtime configuration given or not */ - bool has_cfg_runtime; - /** Default libconfig */ - config_t cfg_default; - /** Given runtime libconfig */ - config_t cfg_runtime; -} libconfig_t; - -#ifdef __cplusplus -} -#endif - -#endif +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Copyright (c) 2018, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef EM_LIBCONFIG_TYPES_H_ +#define EM_LIBCONFIG_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/** Configuration setting */ +typedef config_setting_t libconfig_setting_t; +/** List setting */ +typedef config_setting_t libconfig_list_t; +/** Group setting */ +typedef config_setting_t libconfig_group_t; + +typedef struct libconfig { + /** Runtime configuration given or not */ + bool has_cfg_runtime; + /** Default libconfig */ + config_t cfg_default; + /** Given runtime libconfig */ + config_t cfg_runtime; +} libconfig_t; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/em_mem.h b/src/em_mem.h index 20ef2118..c2e7b57c 100644 --- a/src/em_mem.h +++ b/src/em_mem.h @@ -1,240 +1,260 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * EM Shared & Local Memory data - * - */ - -#ifndef EM_MEM_H_ -#define EM_MEM_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * EM shared memory data - * - * Struct contains data that is shared between all EM-cores, - * i.e. shared between all EM-processes or EM-threads depending on the setup. - */ -typedef struct { - /** Handle for this shared memory */ - odp_shm_t this_shm; - /** EM internal log function, overridable via em_conf, var args */ - em_log_func_t log_fn; - /** EM internal log function, overridable via em_conf, va_list */ - em_vlog_func_t vlog_fn; - /** EM configuration as given to em_init() */ - em_conf_t conf ENV_CACHE_LINE_ALIGNED; - /** Initialization state data */ - init_t init ENV_CACHE_LINE_ALIGNED; - /** EM config file options */ - opt_t opt ENV_CACHE_LINE_ALIGNED; - /** Mapping between physical core id <-> EM core id */ - core_map_t core_map ENV_CACHE_LINE_ALIGNED; - /** Table of buffer/packet/event pools used by EM */ - mpool_tbl_t mpool_tbl ENV_CACHE_LINE_ALIGNED; - /** Pool of free event/mempools */ - mpool_pool_t mpool_pool ENV_CACHE_LINE_ALIGNED; - /** EO table */ - eo_tbl_t eo_tbl ENV_CACHE_LINE_ALIGNED; - /** EO pool of free/unused EOs */ - eo_pool_t eo_pool ENV_CACHE_LINE_ALIGNED; - /** Event Chaining resources */ - event_chaining_t event_chaining ENV_CACHE_LINE_ALIGNED; - /** Queue table */ - queue_tbl_t queue_tbl ENV_CACHE_LINE_ALIGNED; - /** Queue pool of free/unused dynamic queues */ - queue_pool_t queue_pool ENV_CACHE_LINE_ALIGNED; - /** Queue pool of free/unused static queues */ - queue_pool_t queue_pool_static ENV_CACHE_LINE_ALIGNED; - /** Queue group table */ - queue_group_tbl_t queue_group_tbl ENV_CACHE_LINE_ALIGNED; - /** Queue group pool of free/unused queue groups */ - queue_group_pool_t queue_group_pool ENV_CACHE_LINE_ALIGNED; - /** Atomic group table */ - atomic_group_tbl_t atomic_group_tbl ENV_CACHE_LINE_ALIGNED; - /** Dynamic atomic group pool */ - atomic_group_pool_t atomic_group_pool ENV_CACHE_LINE_ALIGNED; - /** Event group table */ - event_group_tbl_t event_group_tbl ENV_CACHE_LINE_ALIGNED; - /** Event group pool of free/unused queue groups */ - event_group_pool_t event_group_pool ENV_CACHE_LINE_ALIGNED; - /** Error handler structure */ - error_handler_t error_handler ENV_CACHE_LINE_ALIGNED; - - /** Dispatcher enter callback functions currently in use */ - hook_tbl_t *dispatch_enter_cb_tbl ENV_CACHE_LINE_ALIGNED; - /** Dispatcher exit callback functions currently in use */ - hook_tbl_t *dispatch_exit_cb_tbl; - /** Alloc-hook functions currently in use */ - hook_tbl_t *alloc_hook_tbl; - /** Free-hook functions currently in use */ - hook_tbl_t *free_hook_tbl; - /** Send-hook functions currently in use */ - hook_tbl_t *send_hook_tbl; - - /** Dispatch enter callback storage, many sets of callback-tables */ - hook_storage_t dispatch_enter_cb_storage ENV_CACHE_LINE_ALIGNED; - /** Dispatch exit callback storage, many sets of callback-tables */ - hook_storage_t dispatch_exit_cb_storage ENV_CACHE_LINE_ALIGNED; - /** Alloc-hook function storage, many sets of hook-tables */ - hook_storage_t alloc_hook_storage ENV_CACHE_LINE_ALIGNED; - /** Free-hook function storage, many sets of hook-tables */ - hook_storage_t free_hook_storage ENV_CACHE_LINE_ALIGNED; - /** Send-hook function storage, many sets of hook-tables */ - hook_storage_t send_hook_storage ENV_CACHE_LINE_ALIGNED; - - /** Current number of allocated EOs */ - env_atomic32_t eo_count ENV_CACHE_LINE_ALIGNED; - /** Timer resources */ - timer_storage_t timers ENV_CACHE_LINE_ALIGNED; - /** Daemon eo */ - daemon_eo_t daemon ENV_CACHE_LINE_ALIGNED; - /** Current number of allocated queues */ - env_atomic32_t queue_count; - /** Current number of allocated queue groups */ - env_atomic32_t queue_group_count; - /** Current number of allocated event groups */ - env_atomic32_t event_group_count; - /** Current number of allocated atomic groups */ - env_atomic32_t atomic_group_count; - /** Current number of allocated event pools */ - env_atomic32_t pool_count; - /** libconfig setting, default (compiled) and runtime (from file) */ - libconfig_t libconfig; - /** priority mapping */ - struct { - /** mapping table */ - int map[EM_QUEUE_PRIO_NUM]; - int num_runtime; - } queue_prio; - - /** Guarantee that size is a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} em_shm_t; - -COMPILE_TIME_ASSERT(sizeof(em_shm_t) % ENV_CACHE_LINE_SIZE == 0, - EM_SHM_SIZE_ERROR); - -/** - * EM core/local current state - * - * Contains information about the current EO, queue, event group etc. when - * running in an EO context (e.g. in an EO-receive function), - * undef/NULL otherwise. - */ -typedef struct { - /** Current queue element during a receive call */ - queue_elem_t *q_elem; - /** Current scheduled queue element that set the sched context*/ - queue_elem_t *sched_q_elem; - /** Current event group element */ - event_group_elem_t *egrp_elem; - /** Current event group */ - em_event_group_t egrp; - /** Current event group generation count*/ - int32_t egrp_gen; - /** EO-receive function burst count */ - int rcv_multi_cnt; - /** Current scheduling context type */ - em_sched_context_type_t sched_context_type; -} em_locm_current_t; - -/** - * EM core local data - */ -typedef struct { - /** EM core/local current state */ - em_locm_current_t current; - - /** EM core id for this core */ - int core_id; - /** The number of events from the scheduler to dispatch */ - int event_burst_cnt; - /** em_atomic_processing_end() called during event dispatch */ - int atomic_group_released; - - /** Number of dispatch rounds since previous polling of ctrl queues */ - unsigned int dispatch_cnt; - /** Time when polling of ctrl queues where last done */ - odp_time_t dispatch_last_run; - - /** Local queues, i.e. storage for events to local queues */ - local_queues_t local_queues; - - /** Track output-queues used during this dispatch round (burst) */ - output_queue_track_t output_queue_track; - - /** EO start-function ongoing, buffer all events and send after start */ - eo_elem_t *start_eo_elem; - /** The number of errors on a core */ - uint64_t error_count; - - /** Is input_poll_fn executed on this core */ - bool do_input_poll; - /** Is output_drain_fn executed on this core */ - bool do_output_drain; - - /** Number of dispatch rounds since previous call of poll/drain functions */ - unsigned int poll_drain_dispatch_cnt; - /** Time when poll and drain functions were last called */ - odp_time_t poll_drain_dispatch_last_run; - - /** EM-core local log function */ - em_log_func_t log_fn; - - /** Is thread external to EM (doesn't participate in event dispatching) */ - bool is_external_thr; - - /** Synchronous API */ - sync_api_t sync_api; - - /** Guarantee that size is a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} em_locm_t; - -COMPILE_TIME_ASSERT((sizeof(em_locm_t) % ENV_CACHE_LINE_SIZE) == 0, - EM_LOCM_SIZE_ERROR); - -/** EM shared memory pointer */ -extern em_shm_t *em_shm; -/** EM core local memory */ -extern ENV_LOCAL em_locm_t em_locm; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_MEM_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * EM Shared & Local Memory data + * + */ + +#ifndef EM_MEM_H_ +#define EM_MEM_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +/** + * EM shared memory data + * + * Struct contains data that is shared between all EM-cores, + * i.e. shared between all EM-processes or EM-threads depending on the setup. + */ +typedef struct { + /** Handle for this shared memory */ + odp_shm_t this_shm; + /** EM internal log function, overridable via em_conf, var args */ + em_log_func_t log_fn; + /** EM internal log function, overridable via em_conf, va_list */ + em_vlog_func_t vlog_fn; + /** EM configuration as given to em_init() */ + em_conf_t conf ENV_CACHE_LINE_ALIGNED; + /** Initialization state data */ + init_t init ENV_CACHE_LINE_ALIGNED; + /** EM config file options */ + opt_t opt ENV_CACHE_LINE_ALIGNED; + /** Mapping between physical core id <-> EM core id */ + core_map_t core_map ENV_CACHE_LINE_ALIGNED; + /** Table of buffer/packet/event pools used by EM */ + mpool_tbl_t mpool_tbl ENV_CACHE_LINE_ALIGNED; + /** Pool of free event/mempools */ + mpool_pool_t mpool_pool ENV_CACHE_LINE_ALIGNED; + /** EO table */ + eo_tbl_t eo_tbl ENV_CACHE_LINE_ALIGNED; + /** EO pool of free/unused EOs */ + eo_pool_t eo_pool ENV_CACHE_LINE_ALIGNED; + /** Event Chaining resources */ + event_chaining_t event_chaining ENV_CACHE_LINE_ALIGNED; + /** Queue table */ + queue_tbl_t queue_tbl ENV_CACHE_LINE_ALIGNED; + /** Queue pool of free/unused dynamic queues */ + queue_pool_t queue_pool ENV_CACHE_LINE_ALIGNED; + /** Queue pool of free/unused static queues */ + queue_pool_t queue_pool_static ENV_CACHE_LINE_ALIGNED; + /** Queue group table */ + queue_group_tbl_t queue_group_tbl ENV_CACHE_LINE_ALIGNED; + /** Queue group pool of free/unused queue groups */ + queue_group_pool_t queue_group_pool ENV_CACHE_LINE_ALIGNED; + /** Atomic group table */ + atomic_group_tbl_t atomic_group_tbl ENV_CACHE_LINE_ALIGNED; + /** Dynamic atomic group pool */ + atomic_group_pool_t atomic_group_pool ENV_CACHE_LINE_ALIGNED; + /** Event group table */ + event_group_tbl_t event_group_tbl ENV_CACHE_LINE_ALIGNED; + /** Event group pool of free/unused queue groups */ + event_group_pool_t event_group_pool ENV_CACHE_LINE_ALIGNED; + /** Error handler structure */ + error_handler_t error_handler ENV_CACHE_LINE_ALIGNED; + + /** Dispatcher enter callback functions currently in use */ + hook_tbl_t *dispatch_enter_cb_tbl ENV_CACHE_LINE_ALIGNED; + /** Dispatcher exit callback functions currently in use */ + hook_tbl_t *dispatch_exit_cb_tbl; + /** Alloc-hook functions currently in use */ + hook_tbl_t *alloc_hook_tbl; + /** Free-hook functions currently in use */ + hook_tbl_t *free_hook_tbl; + /** Send-hook functions currently in use */ + hook_tbl_t *send_hook_tbl; + /** To_idle hook functions currently in use */ + hook_tbl_t *to_idle_hook_tbl; + /** To_active hook functions currently in use */ + hook_tbl_t *to_active_hook_tbl; + /** While_idle hook functions currently in use */ + hook_tbl_t *while_idle_hook_tbl; + + /** Dispatch enter callback storage, many sets of callback-tables */ + hook_storage_t dispatch_enter_cb_storage ENV_CACHE_LINE_ALIGNED; + /** Dispatch exit callback storage, many sets of callback-tables */ + hook_storage_t dispatch_exit_cb_storage ENV_CACHE_LINE_ALIGNED; + /** Alloc-hook function storage, many sets of hook-tables */ + hook_storage_t alloc_hook_storage ENV_CACHE_LINE_ALIGNED; + /** Free-hook function storage, many sets of hook-tables */ + hook_storage_t free_hook_storage ENV_CACHE_LINE_ALIGNED; + /** Send-hook function storage, many sets of hook-tables */ + hook_storage_t send_hook_storage ENV_CACHE_LINE_ALIGNED; + /** To_idle hook functions storage, many sets of hook-tables */ + hook_storage_t to_idle_hook_storage; + /** To_active hook functions storage, many sets of hook-tables */ + hook_storage_t to_active_hook_storage; + /** While_idle hook functions storage, many sets of hook-tables */ + hook_storage_t while_idle_hook_storage; + + /** Current number of allocated EOs */ + env_atomic32_t eo_count ENV_CACHE_LINE_ALIGNED; + /** Timer resources */ + timer_storage_t timers ENV_CACHE_LINE_ALIGNED; + /** Daemon eo */ + daemon_eo_t daemon ENV_CACHE_LINE_ALIGNED; + /** Current number of allocated queues */ + env_atomic32_t queue_count; + /** Current number of allocated queue groups */ + env_atomic32_t queue_group_count; + /** Current number of allocated event groups */ + env_atomic32_t event_group_count; + /** Current number of allocated atomic groups */ + env_atomic32_t atomic_group_count; + /** Current number of allocated event pools */ + env_atomic32_t pool_count; + /** libconfig setting, default (compiled) and runtime (from file) */ + libconfig_t libconfig; + /** priority mapping */ + struct { + /** mapping table */ + int map[EM_QUEUE_PRIO_NUM]; + int num_runtime; + } queue_prio; + + /** Guarantee that size is a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} em_shm_t; + +COMPILE_TIME_ASSERT(sizeof(em_shm_t) % ENV_CACHE_LINE_SIZE == 0, + EM_SHM_SIZE_ERROR); + +/** + * EM core/local current state + * + * Contains information about the current EO, queue, event group etc. when + * running in an EO context (e.g. in an EO-receive function), + * undef/NULL otherwise. + */ +typedef struct { + /** Current queue element during a receive call */ + queue_elem_t *q_elem; + /** Current scheduled queue element that set the sched context*/ + queue_elem_t *sched_q_elem; + /** Current event group element */ + event_group_elem_t *egrp_elem; + /** Current event group */ + em_event_group_t egrp; + /** Current event group generation count*/ + int32_t egrp_gen; + /** EO-receive function burst count */ + int rcv_multi_cnt; + /** Current scheduling context type */ + em_sched_context_type_t sched_context_type; +} em_locm_current_t; + +/** + * EM core local data + */ +typedef struct { + /** EM core/local current state */ + em_locm_current_t current; + + /** EM core id for this core */ + int core_id; + /** The number of events from the scheduler to dispatch */ + int event_burst_cnt; + /** em_atomic_processing_end() called during event dispatch */ + int atomic_group_released; + + /** Number of dispatch rounds since previous polling of ctrl queues */ + unsigned int dispatch_cnt; + /** Time when polling of ctrl queues where last done */ + odp_time_t dispatch_last_run; + + /** Local queues, i.e. storage for events to local queues */ + local_queues_t local_queues; + + /** Track output-queues used during this dispatch round (burst) */ + output_queue_track_t output_queue_track; + + /** EO start-function ongoing, buffer all events and send after start */ + eo_elem_t *start_eo_elem; + /** The number of errors on a core */ + uint64_t error_count; + + /** Is input_poll_fn executed on this core */ + bool do_input_poll; + /** Is output_drain_fn executed on this core */ + bool do_output_drain; + + /** Number of dispatch rounds since previous call of poll/drain functions */ + unsigned int poll_drain_dispatch_cnt; + /** Time when poll and drain functions were last called */ + odp_time_t poll_drain_dispatch_last_run; + + /** EM-core local log function */ + em_log_func_t log_fn; + + /** Is thread external to EM (doesn't participate in event dispatching) */ + bool is_external_thr; + + /** Synchronous API */ + sync_api_t sync_api; + + /** Idle state of the core, used when calling idle hooks */ + idle_state_t idle_state; + + /** dispatcher debug timestamps (ns) */ + uint64_t debug_ts[EM_DEBUG_TSP_LAST]; + + /** Guarantee that size is a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} em_locm_t; + +COMPILE_TIME_ASSERT((sizeof(em_locm_t) % ENV_CACHE_LINE_SIZE) == 0, + EM_LOCM_SIZE_ERROR); + +/** EM shared memory pointer */ +extern em_shm_t *em_shm; +/** EM core local memory */ +extern ENV_LOCAL em_locm_t em_locm; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_MEM_H_ */ diff --git a/src/em_pool.c b/src/em_pool.c index b400e2eb..4543e5af 100644 --- a/src/em_pool.c +++ b/src/em_pool.c @@ -1,984 +1,1673 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -#ifndef __clang__ -COMPILE_TIME_ASSERT(EM_POOL_DEFAULT > (em_pool_t)0 && - EM_POOL_DEFAULT < (em_pool_t)EM_CONFIG_POOLS, - EM_ODP_EM_DEFAULT_POOL_ERROR); -COMPILE_TIME_ASSERT(EM_POOL_UNDEF != EM_POOL_DEFAULT, - EM_ODP_EM_POOL_UNDEF_ERROR); -#endif -COMPILE_TIME_ASSERT(EM_EVENT_USER_AREA_MAX_SIZE < UINT16_MAX, - EM_ODP_EM_EVENT_USER_AREA_MAX_SIZE_ERROR); -/* - * Max supported value for the config file option 'pool.align_offset'. - * - * The limitation is set by events based on odp-bufs that include the ev-hdr at - * the beginning of the odp-buf payload - the alignment is adjusted into the end - * of the ev-hdr. - * Events based on odp-pkts do not have this restriction but the same limit is - * used for all. - */ -#define ALIGN_OFFSET_MAX ((int)(sizeof(event_hdr_t) - \ - offsetof(event_hdr_t, end_hdr_data))) - -static inline mpool_elem_t * -mpool_poolelem2pool(objpool_elem_t *const objpool_elem) -{ - return (mpool_elem_t *)((uintptr_t)objpool_elem - - offsetof(mpool_elem_t, objpool_elem)); -} - -static int -read_config_file(void) -{ - const char *conf_str; - bool val_bool = false; - int val = 0; - int ret; - - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - - EM_PRINT("EM-pool config:\n"); - - /* - * Option: pool.statistics_enable - */ - conf_str = "pool.statistics_enable"; - ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - - if (val_bool) { - if (!capa->buf.stats.bit.available || !capa->pkt.stats.bit.available) { - EM_LOG(EM_LOG_ERR, "! %s: NOT supported by ODP - disabling!\n", - conf_str); - val_bool = false; /* disable pool statistics, no ODP support! */ - } - - if (!capa->buf.stats.bit.cache_available || !capa->pkt.stats.bit.cache_available) { - EM_LOG(EM_LOG_ERR, "! %s: omit events in pool cache, no ODP support!\n", - conf_str); - } - } - - /* store & print the value */ - em_shm->opt.pool.statistics_enable = (int)val_bool; - EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", - val_bool); - - /* - * Option: pool.align_offset - */ - conf_str = "pool.align_offset"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0 || val > ALIGN_OFFSET_MAX || !POWEROF2(val)) { - EM_LOG(EM_LOG_ERR, - "Bad config value '%s = %d' (max: %d and value must be power of 2)\n", - conf_str, val, ALIGN_OFFSET_MAX); - return -1; - } - /* store & print the value */ - em_shm->opt.pool.align_offset = val; - EM_PRINT(" %s (default): %d (max: %d)\n", - conf_str, val, ALIGN_OFFSET_MAX); - - /* - * Option: pool.user_area_size - */ - conf_str = "pool.user_area_size"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0 || (unsigned int)val > capa->pkt.max_uarea_size || - val > EM_EVENT_USER_AREA_MAX_SIZE) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.pool.user_area_size = val; - EM_PRINT(" %s (default): %d (max: %d)\n", - conf_str, val, - MIN(EM_EVENT_USER_AREA_MAX_SIZE, capa->pkt.max_uarea_size)); - - /* - * Option: pool.pkt_headroom - */ - conf_str = "pool.pkt_headroom"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - - if (val < 0 || (unsigned int)val > capa->pkt.max_headroom) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.pool.pkt_headroom = val; - EM_PRINT(" %s (default): %d (max: %u)\n", - conf_str, val, capa->pkt.max_headroom); - - return 0; -} - -em_status_t -pool_init(mpool_tbl_t *const mpool_tbl, mpool_pool_t *const mpool_pool, - const em_pool_cfg_t *default_pool_cfg) -{ - em_pool_t pool; - int ret; - const int cores = em_core_count(); - - memset(mpool_tbl, 0, sizeof(mpool_tbl_t)); - memset(mpool_pool, 0, sizeof(mpool_pool_t)); - env_atomic32_init(&em_shm->pool_count); - - ret = objpool_init(&mpool_pool->objpool, cores); - if (ret != 0) - return EM_ERR_OPERATION_FAILED; - - for (int i = 0; i < EM_CONFIG_POOLS; i++) { - pool = pool_idx2hdl(i); - mpool_elem_t *mpool_elem = pool_elem_get(pool); - - if (unlikely(!mpool_elem)) - return EM_ERR_BAD_POINTER; - - mpool_elem->em_pool = pool; - mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; - for (int j = 0; j < EM_MAX_SUBPOOLS; j++) { - mpool_elem->odp_pool[j] = ODP_POOL_INVALID; - mpool_elem->size[j] = 0; - } - - objpool_add(&mpool_pool->objpool, i % cores, - &mpool_elem->objpool_elem); - } - - /* Init the mapping tbl from odp-pool(=subpool) index to em-pool */ - if (odp_pool_max_index() >= POOL_ODP2EM_TBL_LEN) - return EM_ERR_TOO_LARGE; - for (int i = 0; i < POOL_ODP2EM_TBL_LEN; i++) - mpool_tbl->pool_odp2em[i] = EM_POOL_UNDEF; - - /* Store common ODP pool capabilities in the mpool_tbl for easy access*/ - if (odp_pool_capability(&mpool_tbl->odp_pool_capability) != 0) - return EM_ERR_LIB_FAILED; - - /* Read EM-pool related runtime config options */ - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - /* Create the 'EM_POOL_DEFAULT' pool */ - pool = em_pool_create(EM_POOL_DEFAULT_NAME, EM_POOL_DEFAULT, - default_pool_cfg); - if (pool == EM_POOL_UNDEF || pool != EM_POOL_DEFAULT) - return EM_ERR_ALLOC_FAILED; - - return EM_OK; -} - -em_status_t -pool_term(const mpool_tbl_t *mpool_tbl) -{ - em_status_t stat = EM_OK; - int i; - - (void)mpool_tbl; - - EM_PRINT("\n" - "Status before delete:\n"); - em_pool_info_print_all(); - - for (i = 0; i < EM_CONFIG_POOLS; i++) { - em_pool_t pool = pool_idx2hdl(i); - const mpool_elem_t *mpool_elem = pool_elem_get(pool); - em_status_t ret; - - if (pool_allocated(mpool_elem)) { - ret = pool_delete(pool); - if (ret != EM_OK) - stat = ret; /* save last error as return val */ - } - } - - return stat; -} - -static em_pool_t -pool_alloc(em_pool_t pool) -{ - mpool_elem_t *mpool_elem; - - if (pool == EM_POOL_UNDEF) { - objpool_elem_t *objpool_elem = - objpool_rem(&em_shm->mpool_pool.objpool, em_core_id()); - - if (unlikely(objpool_elem == NULL)) - return EM_POOL_UNDEF; - - mpool_elem = mpool_poolelem2pool(objpool_elem); - } else { - int ret; - - mpool_elem = pool_elem_get(pool); - if (unlikely(mpool_elem == NULL)) - return EM_POOL_UNDEF; - - ret = objpool_rem_elem(&em_shm->mpool_pool.objpool, - &mpool_elem->objpool_elem); - if (unlikely(ret != 0)) - return EM_POOL_UNDEF; - } - - env_atomic32_inc(&em_shm->pool_count); - return mpool_elem->em_pool; -} - -static em_status_t -pool_free(em_pool_t pool) -{ - mpool_elem_t *mpool_elem = pool_elem_get(pool); - - if (unlikely(mpool_elem == NULL)) - return EM_ERR_BAD_ID; - - objpool_add(&em_shm->mpool_pool.objpool, - mpool_elem->objpool_elem.subpool_idx, - &mpool_elem->objpool_elem); - - env_atomic32_dec(&em_shm->pool_count); - return EM_OK; -} - -/* Helper func to invalid_pool_cfg() */ -static int invalid_pool_cache_cfg(const em_pool_cfg_t *pool_cfg, - const char **err_str/*out*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - uint32_t min_cache_size; - uint32_t cache_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_SW) - min_cache_size = capa->buf.min_cache_size; - else - min_cache_size = capa->pkt.min_cache_size; - - for (int i = 0; i < pool_cfg->num_subpools; i++) { - if (pool_cfg->subpool[i].size <= 0 || - pool_cfg->subpool[i].num <= 0) { - *err_str = "Invalid subpool size/num"; - return -(9 * 10 + i); /* -90, -91, ... */ - } - - cache_size = pool_cfg->subpool[i].cache_size; - if (unlikely(cache_size < min_cache_size)) { - *err_str = "Requested cache size too small"; - return -(10 * 10 + i); /* -100, -101, ... */ - } - /* - * If the given cache size is larger than odp-max, - * then use odp-max: - * if (cache_size > max_cache_size) - * cache_size = max_cache_size; - * This is done later in pool_create(); - */ - } - - return 0; -} - -int invalid_pool_cfg(const em_pool_cfg_t *pool_cfg, const char **err_str/*out*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - - if (!pool_cfg) { - *err_str = "Pool config NULL"; - return -1; - } - if (pool_cfg->__internal_check != EM_CHECK_INIT_CALLED) { - *err_str = "Pool config not initialized"; - return -2; - } - - if (pool_cfg->num_subpools <= 0 || - pool_cfg->num_subpools > EM_MAX_SUBPOOLS) { - *err_str = "Invalid number of subpools"; - return -3; - } - - if (pool_cfg->event_type != EM_EVENT_TYPE_SW && - pool_cfg->event_type != EM_EVENT_TYPE_PACKET) { - *err_str = "Pool event type not supported, use _SW or _PACKET"; - return -4; - } - - if (pool_cfg->align_offset.in_use && - (pool_cfg->align_offset.value > ALIGN_OFFSET_MAX || - !POWEROF2(pool_cfg->align_offset.value))) { - *err_str = "Invalid align offset"; - return -5; - } - - if (pool_cfg->user_area.in_use) { - if (pool_cfg->user_area.size > EM_EVENT_USER_AREA_MAX_SIZE) { - *err_str = "Event user area too large"; - return -6; - } - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - size_t req_odp_uarea_sz = pool_cfg->user_area.size + - sizeof(event_hdr_t); - if (req_odp_uarea_sz > capa->pkt.max_uarea_size) { - *err_str = "ODP pkt max uarea not large enough"; - return -7; - } - } - } - - if (pool_cfg->pkt.headroom.in_use && - pool_cfg->pkt.headroom.value > capa->pkt.max_headroom) { - *err_str = "Requested pkt headroom size too large"; - return -8; - } - - int err = invalid_pool_cache_cfg(pool_cfg, err_str/*out*/); - - return err; -} - -/* - * Helper to pool_create() - preallocate all events in the pool for ESV to - * maintain event state over multiple alloc- and free-operations. - */ -static void -pool_prealloc(const mpool_elem_t *pool_elem) -{ - em_event_t event; - event_hdr_t *ev_hdr; - uint64_t num_tot = 0; - uint64_t num = 0; - const uint32_t size = pool_elem->pool_cfg.subpool[0].size; - list_node_t evlist; - list_node_t *node; - - list_init(&evlist); - - for (int i = 0; i < pool_elem->num_subpools; i++) - num_tot += pool_elem->pool_cfg.subpool[i].num; - - do { - event = event_prealloc(pool_elem, size, pool_elem->event_type); - if (likely(event != EM_EVENT_UNDEF)) { - ev_hdr = event_to_hdr(event); - list_add(&evlist, &ev_hdr->start_node); - num++; - } - } while (event != EM_EVENT_UNDEF); - - if (unlikely(num < num_tot)) - INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_SMALL), - EM_ESCOPE_POOL_CREATE, - "events expected:%" PRIu64 " actual:%" PRIu64 "", - num_tot, num); - - while (!list_is_empty(&evlist)) { - node = list_rem_first(&evlist); - ev_hdr = start_node_to_event_hdr(node); - em_free(ev_hdr->event); - } -} - -/* - * pool_create() helper: sort subpool cfg in ascending order based on buf size - */ -static void -sort_pool_cfg(const em_pool_cfg_t *pool_cfg, em_pool_cfg_t *sorted_cfg /*out*/) -{ - const int num_subpools = pool_cfg->num_subpools; - - *sorted_cfg = *pool_cfg; - - for (int i = 0; i < num_subpools - 1; i++) { - int idx = i; /* array index containing smallest size */ - - for (int j = i + 1; j < num_subpools; j++) { - if (sorted_cfg->subpool[j].size < - sorted_cfg->subpool[idx].size) - idx = j; /* store idx to smallest */ - } - - /* min size at [idx], swap with [i] */ - if (idx != i) { - uint32_t size = sorted_cfg->subpool[i].size; - uint32_t num = sorted_cfg->subpool[i].num; - uint32_t cache_size = sorted_cfg->subpool[i].cache_size; - - sorted_cfg->subpool[i] = sorted_cfg->subpool[idx]; - - sorted_cfg->subpool[idx].size = size; - sorted_cfg->subpool[idx].num = num; - sorted_cfg->subpool[idx].cache_size = cache_size; - } - } -} - -/* - * pool_create() helper: set pool event-cache size. - * - * Set the requested subpool cache-size based on user provided value and - * limit set by odp-pool-capability. - * Requested value can be larger than odp-max, use odp--max in this - * case. - * Verification against odp-min value done in invalid_pool_cfg(). - */ -static void -set_poolcache_size(em_pool_cfg_t *pool_cfg) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - int num_subpools = pool_cfg->num_subpools; - uint32_t max_cache_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_SW) - max_cache_size = capa->buf.max_cache_size; - else - max_cache_size = capa->pkt.max_cache_size; - - for (int i = 0; i < num_subpools; i++) { - if (max_cache_size < pool_cfg->subpool[i].cache_size) - pool_cfg->subpool[i].cache_size = max_cache_size; - } -} - -/* - * pool_create() helper: determine payload alignment. - */ -static int -set_align(const em_pool_cfg_t *pool_cfg, - uint32_t *align_offset /*out*/, uint32_t *odp_align /*out*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - uint32_t offset = 0; - uint32_t align = ODP_CACHE_LINE_SIZE; - - /* Pool-specific param overrides config file 'align_offset' value */ - if (pool_cfg->align_offset.in_use) - offset = pool_cfg->align_offset.value; /* pool cfg */ - else - offset = em_shm->opt.pool.align_offset; /* cfg file */ - - /* Set subpool minimum alignment */ - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - if (align > capa->pkt.max_align) - align = capa->pkt.max_align; - } else { - if (align > capa->buf.max_align) - align = capa->buf.max_align; - } - - *align_offset = offset; - *odp_align = align; - - /* verify alignment requirements */ - if (!POWEROF2(align) || align <= offset) - return -1; - - return 0; -} - -/* - * pool_create() helper: determine user area size. - */ -static int -set_uarea_size(const em_pool_cfg_t *pool_cfg, uint32_t align_offset, - size_t *uarea_req_size/*out*/, size_t *uarea_pad_size/*out*/) -{ - size_t req_size = 0; - size_t pad_size = 0; - size_t max_size = 0; - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - - if (pool_cfg->user_area.in_use) /* use pool-cfg */ - req_size = pool_cfg->user_area.size; - else /* use cfg-file */ - req_size = em_shm->opt.pool.user_area_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - pad_size = req_size; - max_size = MIN(capa->pkt.max_uarea_size, - EM_EVENT_USER_AREA_MAX_SIZE); - } else if (req_size > 0) { - /* EM_EVENT_TYPE_SW: bufs */ - /* Note: contains align_offset extra space for adjustment */ - pad_size = ROUND_UP(req_size + align_offset, 32); - max_size = EM_EVENT_USER_AREA_MAX_SIZE; - } - - if (req_size > max_size) - return -1; - - *uarea_req_size = req_size; - *uarea_pad_size = pad_size; - return 0; -} - -/* - * pool_create() helper: set the pkt headroom - */ -static int -set_pkt_headroom(const em_pool_cfg_t *pool_cfg, - uint32_t *pkt_headroom /*out*/, - uint32_t *max_headroom /*out, for err print only*/) -{ - const odp_pool_capability_t *capa = - &em_shm->mpool_tbl.odp_pool_capability; - /* default value from cfg file */ - uint32_t headroom = em_shm->opt.pool.pkt_headroom; - - /* Pool-specific param overrides config file value */ - if (pool_cfg->pkt.headroom.in_use) - headroom = pool_cfg->pkt.headroom.value; - - *pkt_headroom = headroom; - *max_headroom = capa->pkt.max_headroom; - - if (unlikely(headroom > capa->pkt.max_headroom)) - return -1; - - return 0; -} - -/** Helper to create_subpools() */ -static void set_pool_params_pkt(odp_pool_param_t *pool_params /* out */, - uint32_t size, uint32_t num, uint32_t cache_size, - uint32_t align_offset, uint32_t odp_align, - uint32_t uarea_size, uint32_t pkt_headroom) -{ - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - odp_pool_param_init(pool_params); - - pool_params->type = ODP_POOL_PACKET; - /* num == max_num, helps pool-info stats calculation */ - pool_params->pkt.num = num; - pool_params->pkt.max_num = num; - - if (size > align_offset) - size = size - align_offset; - else - size = 1; /* 0:default, can be big => use 1 */ - /* len == max_len */ - pool_params->pkt.len = size; - pool_params->pkt.max_len = size; - pool_params->pkt.seg_len = size; - pool_params->pkt.align = odp_align; - /* - * Reserve space for the event header in each packet's - * ODP-user-area: - */ - pool_params->pkt.uarea_size = sizeof(event_hdr_t) + uarea_size; - /* - * Set the pkt headroom. - * Make sure the alloc-alignment fits into the headroom. - */ - pool_params->pkt.headroom = pkt_headroom; - if (pkt_headroom < align_offset) - pool_params->pkt.headroom = align_offset; - - pool_params->pkt.cache_size = cache_size; - - /* Pkt pool statistics */ - pool_params->stats.all = 0; - if (em_shm->opt.pool.statistics_enable) { - if (capa->pkt.stats.bit.available) - pool_params->stats.bit.available = 1; - if (capa->pkt.stats.bit.cache_available) - pool_params->stats.bit.cache_available = 1; - } -} - -/** Helper to create_subpools() */ -static void set_pool_params_buf(odp_pool_param_t *pool_params /* out */, - uint32_t size, uint32_t num, uint32_t cache_size, - uint32_t odp_align, uint32_t uarea_size) -{ - const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; - - odp_pool_param_init(pool_params); - - pool_params->type = ODP_POOL_BUFFER; - pool_params->buf.num = num; - pool_params->buf.size = size + sizeof(event_hdr_t) + uarea_size; - pool_params->buf.align = odp_align; - pool_params->buf.cache_size = cache_size; - - /* Buf pool statistics */ - pool_params->stats.all = 0; - if (em_shm->opt.pool.statistics_enable) { - if (capa->buf.stats.bit.available) - pool_params->stats.bit.available = 1; - if (capa->buf.stats.bit.cache_available) - pool_params->stats.bit.cache_available = 1; - } -} - -static int -create_subpools(const em_pool_cfg_t *pool_cfg, - uint32_t align_offset, uint32_t odp_align, - uint32_t uarea_size, uint32_t pkt_headroom, - mpool_elem_t *mpool_elem /*out*/) -{ - const int num_subpools = pool_cfg->num_subpools; - mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; - - for (int i = 0; i < num_subpools; i++) { - char pool_name[ODP_POOL_NAME_LEN]; - odp_pool_param_t pool_params; - uint32_t size = pool_cfg->subpool[i].size; - uint32_t num = pool_cfg->subpool[i].num; - uint32_t cache_size = pool_cfg->subpool[i].cache_size; - - if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { - set_pool_params_pkt(&pool_params /* out */, - size, num, cache_size, - align_offset, odp_align, - uarea_size, pkt_headroom); - } else { /* pool_cfg->event_type == EM_EVENT_TYPE_SW */ - set_pool_params_buf(&pool_params /* out */, - size, num, cache_size, - odp_align, uarea_size); - } - - snprintf(pool_name, sizeof(pool_name), "%" PRI_POOL ":%d-%s", - mpool_elem->em_pool, i, mpool_elem->name); - pool_name[sizeof(pool_name) - 1] = '\0'; - - odp_pool_t odp_pool = odp_pool_create(pool_name, &pool_params); - - if (unlikely(odp_pool == ODP_POOL_INVALID)) - return -1; - - int odp_pool_idx = odp_pool_index(odp_pool); - - if (unlikely(odp_pool_idx < 0)) - return -2; - - /* Store mapping from odp-pool (idx) to em-pool */ - mpool_tbl->pool_odp2em[odp_pool_idx] = mpool_elem->em_pool; - - mpool_elem->odp_pool[i] = odp_pool; - mpool_elem->size[i] = pool_cfg->subpool[i].size; - mpool_elem->num_subpools++; /* created subpools for delete */ - - /*odp_pool_print(odp_pool);*/ - } - - return 0; -} - -em_pool_t -pool_create(const char *name, em_pool_t req_pool, const em_pool_cfg_t *pool_cfg) -{ - /* Allocate a free EM pool */ - const em_pool_t pool = pool_alloc(req_pool/* requested or undef*/); - - if (unlikely(pool == EM_POOL_UNDEF)) - return EM_POOL_UNDEF; - - mpool_elem_t *mpool_elem = pool_elem_get(pool); - - /* Sanity check */ - if (!mpool_elem || mpool_elem->em_pool != pool) - return EM_POOL_UNDEF; - - mpool_elem->event_type = pool_cfg->event_type; - /* Store successfully created subpools later */ - mpool_elem->num_subpools = 0; - /* Store the event pool name, if given */ - if (name && *name) { - strncpy(mpool_elem->name, name, sizeof(mpool_elem->name)); - mpool_elem->name[sizeof(mpool_elem->name) - 1] = '\0'; - } else { - mpool_elem->name[0] = '\0'; - } - - em_pool_cfg_t sorted_cfg; - - /* - * Sort the subpool cfg in ascending order based on the buffer size - */ - sort_pool_cfg(pool_cfg, &sorted_cfg/*out*/); - /* Use sorted_cfg instead of pool_cfg from here on */ - - /* - * Set the cache-size of each subpool in the EM-pool - */ - set_poolcache_size(&sorted_cfg); - - /* Store the sorted config */ - mpool_elem->pool_cfg = sorted_cfg; - - /* - * Event payload alignment requirement for the pool - */ - uint32_t align_offset = 0; - uint32_t odp_align = 0; - int err = set_align(&sorted_cfg, &align_offset/*out*/, - &odp_align/*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" align mismatch:\n" - "align:%u cfg:align_offset:%u", - name, odp_align, align_offset); - goto error; - } - /* store the align offset, needed in pkt-alloc */ - mpool_elem->align_offset = align_offset; - - /* - * Event user area size. - * Pool-specific param overrides config file 'user_area_size' value - */ - size_t uarea_req_size = 0; - size_t uarea_pad_size = 0; - - err = set_uarea_size(&sorted_cfg, align_offset, - &uarea_req_size/*out*/, &uarea_pad_size/*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" invalid uarea config:\n" - "req.size:%zu => padded uarea size:%zu", - name, uarea_req_size, uarea_pad_size); - goto error; - } - - /* store the user_area sizes, needed in alloc */ - mpool_elem->user_area.req_size = uarea_req_size & UINT16_MAX; - mpool_elem->user_area.pad_size = uarea_pad_size & UINT16_MAX; - - EM_DBG("EM-pool:\"%s\":\n" - " user_area: .req_size=%zu .pad_size=%zu align_offset=%u\n", - name, uarea_req_size, uarea_pad_size, align_offset); - - /* - * Set the headroom for events in EM packet pools - */ - uint32_t pkt_headroom = 0; - uint32_t max_headroom = 0; - - if (sorted_cfg.event_type == EM_EVENT_TYPE_PACKET) { - err = set_pkt_headroom(&sorted_cfg, &pkt_headroom/*out*/, - &max_headroom/*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" invalid pkt headroom:\n" - "headroom:%u vs. max:headroom:%u", - name, pkt_headroom, max_headroom); - goto error; - } - } - - /* - * Create the subpools for the EM event-pool. - * Each EM subpool is an ODP pool. - */ - err = create_subpools(&sorted_cfg, align_offset, odp_align, - uarea_pad_size, pkt_headroom, mpool_elem /*out*/); - if (unlikely(err)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_ALLOC_FAILED), - EM_ESCOPE_POOL_CREATE, - "EM-pool:\"%s\" create fails:%d\n" - "subpools req:%d vs. subpools created:%d", - name, err, sorted_cfg.num_subpools, - mpool_elem->num_subpools); - goto error; - } - - /* - * ESV: preallocate all events in the pool - */ - if (esv_enabled() && em_shm->opt.esv.prealloc_pools) - pool_prealloc(mpool_elem); - - /* Success! */ - return mpool_elem->em_pool; - -error: - (void)pool_delete(pool); - return EM_POOL_UNDEF; -} - -em_status_t -pool_delete(em_pool_t pool) -{ - mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; - mpool_elem_t *const mpool_elem = pool_elem_get(pool); - int i; - - if (unlikely(mpool_elem == NULL || !pool_allocated(mpool_elem))) - return EM_ERR_BAD_STATE; - - for (i = 0; i < mpool_elem->num_subpools; i++) { - odp_pool_t odp_pool = mpool_elem->odp_pool[i]; - int odp_pool_idx; - int ret; - - if (odp_pool == ODP_POOL_INVALID) - return EM_ERR_NOT_FOUND; - - odp_pool_idx = odp_pool_index(odp_pool); - if (unlikely(odp_pool_idx < 0)) - return EM_ERR_BAD_ID; - - ret = odp_pool_destroy(odp_pool); - if (unlikely(ret)) - return EM_ERR_LIB_FAILED; - - /* Clear mapping from odp-pool (idx) to em-pool */ - mpool_tbl->pool_odp2em[odp_pool_idx] = EM_POOL_UNDEF; - - mpool_elem->odp_pool[i] = ODP_POOL_INVALID; - mpool_elem->size[i] = 0; - } - - mpool_elem->name[0] = '\0'; - mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; - mpool_elem->num_subpools = 0; - - return pool_free(pool); -} - -em_pool_t -pool_find(const char *name) -{ - if (name && *name) { - for (int i = 0; i < EM_CONFIG_POOLS; i++) { - const mpool_elem_t *mpool_elem = - &em_shm->mpool_tbl.pool[i]; - - if (pool_allocated(mpool_elem) && - !strncmp(name, mpool_elem->name, EM_POOL_NAME_LEN)) - return mpool_elem->em_pool; - } - } - - return EM_POOL_UNDEF; -} - -unsigned int -pool_count(void) -{ - return env_atomic32_get(&em_shm->pool_count); -} - -#define POOL_INFO_HDR_STR \ -" id name type offset uarea sizes [size count(used/free) cache]\n" - -#define POOL_INFO_SUBSTR_FMT \ -"%d:[sz=%" PRIu32 " n=%" PRIu32 "(%" PRIu32 "/%" PRIu32 ") $=%" PRIu32 "]" - -#define POOL_INFO_SUBSTR_NO_STATS_FMT \ -"%d:[sz=%" PRIu32 " n=%" PRIu32 "(-/-) cache=%" PRIu32 "]" - -void pool_info_print_hdr(unsigned int num_pools) -{ - if (num_pools == 1) { - EM_PRINT("EM Event Pool\n" - "-------------\n" - POOL_INFO_HDR_STR); - } else { - EM_PRINT("EM Event Pools:%2u\n" - "-----------------\n" - POOL_INFO_HDR_STR, num_pools); - } -} - -void pool_info_print(em_pool_t pool) -{ - em_pool_info_t pool_info; - em_status_t stat; - const char *pool_type; - - stat = em_pool_info(pool, &pool_info/*out*/); - if (unlikely(stat != EM_OK)) { - EM_PRINT(" %-6" PRI_POOL " %-16s n/a n/a n/a n/a [n/a]\n", - pool, "err:n/a"); - return; - } - - pool_type = pool_info.event_type == EM_EVENT_TYPE_SW ? "buf" : "pkt"; - EM_PRINT(" %-6" PRI_POOL " %-16s %4s %02u %02zu %02u ", - pool, pool_info.name, pool_type, - pool_info.align_offset, pool_info.user_area_size, - pool_info.num_subpools); - - for (int i = 0; i < pool_info.num_subpools; i++) { - char subpool_str[42]; - - if (em_shm->opt.pool.statistics_enable) { - snprintf(subpool_str, sizeof(subpool_str), - POOL_INFO_SUBSTR_FMT, i, - pool_info.subpool[i].size, - pool_info.subpool[i].num, - pool_info.subpool[i].used, - pool_info.subpool[i].free, - pool_info.subpool[i].cache_size); - } else { - snprintf(subpool_str, sizeof(subpool_str), - POOL_INFO_SUBSTR_NO_STATS_FMT, i, - pool_info.subpool[i].size, - pool_info.subpool[i].num, - pool_info.subpool[i].cache_size); - } - subpool_str[sizeof(subpool_str) - 1] = '\0'; - EM_PRINT(" %-42s", subpool_str); - } - - EM_PRINT("\n"); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +#ifndef __clang__ +COMPILE_TIME_ASSERT(EM_POOL_DEFAULT > (em_pool_t)0 && + EM_POOL_DEFAULT < (em_pool_t)EM_CONFIG_POOLS, + EM_ODP_EM_DEFAULT_POOL_ERROR); +COMPILE_TIME_ASSERT(EM_POOL_UNDEF != EM_POOL_DEFAULT, + EM_ODP_EM_POOL_UNDEF_ERROR); +#endif +COMPILE_TIME_ASSERT(EM_EVENT_USER_AREA_MAX_SIZE < UINT16_MAX, + EM_ODP_EM_EVENT_USER_AREA_MAX_SIZE_ERROR); +/* + * Max supported value for the config file option 'pool.align_offset'. + * + * The limitation is set by events based on odp-bufs that include the ev-hdr at + * the beginning of the odp-buf payload - the alignment is adjusted into the end + * of the ev-hdr. + * Events based on odp-pkts do not have this restriction but the same limit is + * used for all. + */ +#define ALIGN_OFFSET_MAX ((int)(sizeof(event_hdr_t) - \ + offsetof(event_hdr_t, end_hdr_data))) + +static inline mpool_elem_t * +mpool_poolelem2pool(objpool_elem_t *const objpool_elem) +{ + return (mpool_elem_t *)((uintptr_t)objpool_elem - + offsetof(mpool_elem_t, objpool_elem)); +} + +static em_pool_t +pool_alloc(em_pool_t pool) +{ + mpool_elem_t *mpool_elem; + + if (pool == EM_POOL_UNDEF) { + objpool_elem_t *objpool_elem = + objpool_rem(&em_shm->mpool_pool.objpool, em_core_id()); + + if (unlikely(objpool_elem == NULL)) + return EM_POOL_UNDEF; + + mpool_elem = mpool_poolelem2pool(objpool_elem); + } else { + int ret; + + mpool_elem = pool_elem_get(pool); + if (unlikely(mpool_elem == NULL)) + return EM_POOL_UNDEF; + + ret = objpool_rem_elem(&em_shm->mpool_pool.objpool, + &mpool_elem->objpool_elem); + if (unlikely(ret != 0)) + return EM_POOL_UNDEF; + } + + env_atomic32_inc(&em_shm->pool_count); + return mpool_elem->em_pool; +} + +static em_status_t +pool_free(em_pool_t pool) +{ + mpool_elem_t *mpool_elem = pool_elem_get(pool); + + if (unlikely(mpool_elem == NULL)) + return EM_ERR_BAD_ID; + + objpool_add(&em_shm->mpool_pool.objpool, + mpool_elem->objpool_elem.subpool_idx, + &mpool_elem->objpool_elem); + + env_atomic32_dec(&em_shm->pool_count); + return EM_OK; +} + +static int event_type_from_string(const char *str, em_event_type_t *event_type /*out*/) +{ + if (strstr(str, "EM_EVENT_TYPE_SW")) { + *event_type = EM_EVENT_TYPE_SW; + } else if (strstr(str, "EM_EVENT_TYPE_PACKET")) { + *event_type = EM_EVENT_TYPE_PACKET; + } else if (strstr(str, "EM_EVENT_TYPE_VECTOR")) { + *event_type = EM_EVENT_TYPE_VECTOR; + } else { + EM_LOG(EM_LOG_ERR, "Event type %s not supported.\n", str); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[i].pool_cfg.subpools[j] from the EM config file */ +static inline int read_config_subpool(const libconfig_list_t *subpool, int index, + const char *pool_cfg_str, em_pool_cfg_t *cfg/*out*/) +{ + int ret; + /* Option: subpools[index].size */ + ret = em_libconfig_list_lookup_int(subpool, index, "size", + (int *)&cfg->subpool[index].size); + if (unlikely(ret != 1)) { + EM_LOG(EM_LOG_ERR, + "Option '%s.subpools[%d].size' not found or wrong type.\n", + pool_cfg_str, index); + return -1; + } + + if (cfg->subpool[index].size <= 0) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.subpools[%d].size'.\n", + pool_cfg_str, index); + return -1; + } + + /* Option: subpools[index].num */ + ret = em_libconfig_list_lookup_int(subpool, index, "num", + (int *)&cfg->subpool[index].num); + if (unlikely(ret != 1)) { + EM_LOG(EM_LOG_ERR, + "Option '%s.subpools[%d].num' not found or wrong type.\n", + pool_cfg_str, index); + return -1; + } + + if (cfg->subpool[index].num <= 0) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.subpools[%d].num'.\n", + pool_cfg_str, index); + return -1; + } + + /* + * Option: subpools[index].cache_size + * Not mandatory + */ + ret = em_libconfig_list_lookup_int(subpool, index, "cache_size", + (int *)&cfg->subpool[index].cache_size); + + /* If cache_size is given, check if it is valid */ + if (ret == 1) { + uint32_t min_cache_size; + const odp_pool_capability_t *capa; + + capa = &em_shm->mpool_tbl.odp_pool_capability; + + min_cache_size = (cfg->event_type == EM_EVENT_TYPE_SW) ? + capa->buf.min_cache_size : capa->pkt.min_cache_size; + + if (unlikely(cfg->subpool[index].cache_size < min_cache_size)) { + EM_LOG(EM_LOG_ERR, + "'%s.subpools[%d].cache_size' too small.\n", + pool_cfg_str, index); + return -1; + } + } else if (ret == 0) {/*cache_size is given but with wrong data type*/ + EM_LOG(EM_LOG_ERR, + "'%s.subpools[%d].cache_size' wrong data type.\n", + pool_cfg_str, index); + return -1; + } + + /* No need to return fail -1 when cache_size not given (ret == -1) */ + return 0; +} + +static int is_pool_type_supported(em_event_type_t type, + const char **err_str/*out*/) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + if (type == EM_EVENT_TYPE_SW) { + if (capa->buf.max_pools == 0) { + *err_str = "SW (buf) pool type unsupported"; + return -1; + } + } else if (type == EM_EVENT_TYPE_PACKET) { + if (capa->pkt.max_pools == 0) { + *err_str = "PACKET pool type unsupported"; + return -1; + } + } else if (type == EM_EVENT_TYPE_VECTOR) { + if (capa->vector.max_pools == 0) { + *err_str = "VECTOR pool type unsupported"; + return -1; + } + } else { + *err_str = "Pool type unsupported, use _SW, _PACKET or _VECTOR"; + return -1; + } + + return 0; +} + +static inline bool is_align_offset_valid(const em_pool_cfg_t *pool_cfg) +{ + if (pool_cfg->align_offset.in_use && + (pool_cfg->align_offset.value > ALIGN_OFFSET_MAX || + !POWEROF2(pool_cfg->align_offset.value))) { + return false; + } + + return true; +} + +static inline int is_user_area_valid(const em_pool_cfg_t *pool_cfg, + const odp_pool_capability_t *capa, + const char **err_str/*out*/) +{ + /* No need to check when pool specific value is not used */ + if (!pool_cfg->user_area.in_use) + return 0; + + if (pool_cfg->user_area.size > EM_EVENT_USER_AREA_MAX_SIZE) { + *err_str = "Event user area too large"; + return -1; + } + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + size_t req_odp_uarea_sz = pool_cfg->user_area.size + + sizeof(event_hdr_t); + if (req_odp_uarea_sz > capa->pkt.max_uarea_size) { + *err_str = "ODP pkt max uarea not large enough"; + return -1; + } + } + if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { + size_t req_odp_uarea_sz = pool_cfg->user_area.size + + sizeof(event_hdr_t); + if (req_odp_uarea_sz > capa->vector.max_uarea_size) { + *err_str = "ODP pkt-vector max uarea not large enough"; + return -1; + } + } + + return 0; +} + +/* Read option: startup_pools.conf[index].pool_cfg.align_offset from the EM config file */ +static inline int read_config_align_offset(const libconfig_group_t *align_offset, + const char *pool_cfg_str, + em_pool_cfg_t *cfg/*out*/) +{ + int ret; + + /* Option: startup_pools.conf[index].pool_cfg.align_offset.in_use */ + ret = em_libconfig_group_lookup_bool(align_offset, "in_use", + &cfg->align_offset.in_use); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.align_offset.in_use' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.align_offset.value */ + ret = em_libconfig_group_lookup_int(align_offset, "value", + (int *)&cfg->align_offset.value); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.align_offset.value' not found or wront type\n", + pool_cfg_str); + return -1; + } + + /* Check whether the given value is valid or not */ + if (!is_align_offset_valid(cfg)) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.align_offset.value': %d\n" + "Max align_offset is %d and it must be power of 2\n", + pool_cfg_str, cfg->align_offset.value, ALIGN_OFFSET_MAX); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[index].pool_cfg.user_area from the EM config file */ +static inline int read_config_user_area(const libconfig_group_t *user_area, + const char *pool_cfg_str, + em_pool_cfg_t *cfg/*out*/) +{ + int ret; + const odp_pool_capability_t *capa; + const char *err_str = ""; + + /* Option: startup_pools.conf[index].pool_cfg.user_area.in_use */ + ret = em_libconfig_group_lookup_bool(user_area, "in_use", + &cfg->user_area.in_use); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.user_area.in_use' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.user_area.size */ + ret = em_libconfig_group_lookup_int(user_area, "size", + (int *)&cfg->user_area.size); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.user_area.size' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + capa = &em_shm->mpool_tbl.odp_pool_capability; + /* Check whether the given value is valid or not */ + if (is_user_area_valid(cfg, capa, &err_str) < 0) { + EM_LOG(EM_LOG_ERR, "%s: %ld\n", err_str, cfg->user_area.size); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[index].pool_cfg.pkt.headroom from the EM config file */ +static inline int read_config_pkt_headroom(const libconfig_group_t *pkt_headroom, + const char *pool_cfg_str, + em_pool_cfg_t *cfg/*out*/) +{ + int ret; + const odp_pool_capability_t *capa; + + /*Option: startup_pools.conf[index].pool_cfg.pkt.headroom.in_use*/ + ret = em_libconfig_group_lookup_bool(pkt_headroom, "in_use", + &cfg->pkt.headroom.in_use); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.pkt.headroom.in_use' not found or wrong type\n", + pool_cfg_str); + return -1; + } + + /*Option: startup_pools.conf[index].pool_cfg.pkt.headroom.value*/ + ret = em_libconfig_group_lookup_int(pkt_headroom, "value", + (int *)&cfg->pkt.headroom.value); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, + "'%s.pkt.headroom.value' not found or wront type\n", + pool_cfg_str); + return -1; + } + + /* Check whether the given value is valid or not */ + capa = &em_shm->mpool_tbl.odp_pool_capability; + if (cfg->pkt.headroom.in_use && + cfg->pkt.headroom.value > capa->pkt.max_headroom) { + EM_LOG(EM_LOG_ERR, + "'%s.pkt.headroom.value' %d too large (max=%d)\n", + pool_cfg_str, cfg->pkt.headroom.value, + capa->pkt.max_headroom); + return -1; + } + + return 0; +} + +/* Read option: startup_pools.conf[index] from the EM config file */ +static int read_config_startup_pools_conf(const libconfig_list_t *list, int index) +{ + int ret; + int pool; + int ret_pool; + int num_subpools; + const char *pool_name; + const char *event_type; + char pool_cfg_str[40]; + libconfig_group_t *pool_cfg; + const libconfig_list_t *subpool; + const libconfig_group_t *headroom; + const libconfig_group_t *user_area; + const libconfig_group_t *align_offset; + startup_pool_conf_t *conf = &em_shm->opt.startup_pools.conf[index]; + em_pool_cfg_t *cfg = &conf->cfg; + const char *err_str = ""; + + snprintf(pool_cfg_str, sizeof(pool_cfg_str), + "startup_pools.conf[%d].pool_cfg", index); + + pool_cfg = em_libconfig_list_lookup_group(list, index, "pool_cfg"); + if (!pool_cfg) { + EM_LOG(EM_LOG_ERR, "Conf option '%s' not found\n", pool_cfg_str); + return -1; + } + + em_pool_cfg_init(cfg); + + /* + * Read mandatory fields first, in case they are not provided, no need + * to proceed to read optional fields. + */ + + /* Option: startup_pools.conf[index].pool_cfg.event_type */ + ret = em_libconfig_group_lookup_string(pool_cfg, "event_type", &event_type); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "'%s.event_type' not found.\n", pool_cfg_str); + return -1; + } + + ret = event_type_from_string(event_type, &cfg->event_type/*out*/); + if (unlikely(ret < 0)) + return -1; + + ret = is_pool_type_supported(cfg->event_type, &err_str/*out*/); + if (unlikely(ret)) { + EM_LOG(EM_LOG_ERR, "%s", err_str); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.num_subpools */ + ret = em_libconfig_group_lookup_int(pool_cfg, "num_subpools", + &cfg->num_subpools); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "'%s.num_subpools' not found.\n", pool_cfg_str); + return -1; + } + + if (cfg->num_subpools <= 0 || cfg->num_subpools > EM_MAX_SUBPOOLS) { + EM_LOG(EM_LOG_ERR, "Invalid '%s.num_subpools'\n" + "Valid value range is [1, %d]\n", pool_cfg_str, + EM_MAX_SUBPOOLS); + return -1; + } + + /* Option: startup_pools.conf[index].pool_cfg.subpools */ + subpool = em_libconfig_group_lookup_list(pool_cfg, "subpools"); + if (unlikely(!subpool)) { + EM_LOG(EM_LOG_ERR, "'%s.subpools' not found.\n", pool_cfg_str); + return -1; + } + + num_subpools = em_libconfig_list_length(subpool); + if (unlikely(num_subpools != cfg->num_subpools)) { + EM_LOG(EM_LOG_ERR, "The number of subpool configuration given\n" + "in '%s.subpools' does not match '%s.num_subpools'.\n", + pool_cfg_str, pool_cfg_str); + return -1; + } + + for (int j = 0; j < num_subpools; j++) { + ret = read_config_subpool(subpool, j, pool_cfg_str, cfg); + + if (unlikely(ret < 0)) + return -1; + } + + /* Following are optional configurations */ + + /* Option: startup_pools.conf[index].pool */ + ret_pool = em_libconfig_list_lookup_int(list, index, "pool", &pool); + if (unlikely(ret_pool == 0)) { + EM_LOG(EM_LOG_ERR, + "'startup_pools.conf[%d].pool' has wrong data type(expect int)\n", + index); + return -1; + } + + /* startup_pools.conf[index].pool is provided */ + if (ret_pool == 1) { + if (pool < 0 || pool > EM_CONFIG_POOLS) { + EM_LOG(EM_LOG_ERR, "Invalid pool ID %d, valid IDs are within [0, %d]\n", + pool, EM_CONFIG_POOLS); + return -1; + } + + conf->pool = (em_pool_t)(uintptr_t)pool; + } + + /* Option: startup_pools.conf[index].name */ + ret = em_libconfig_list_lookup_string(list, index, "name", &pool_name); + if (unlikely(ret == 0)) { + EM_LOG(EM_LOG_ERR, + "'startup_pools.conf[%d].name' has wrong data type(expect string)\n", + index); + return -1; + } + + if (ret_pool == 1 && ret == 1) { /*Both pool and name have been given*/ + const char *is_default_name = strstr(pool_name, EM_POOL_DEFAULT_NAME); + bool is_default_id = (conf->pool == EM_POOL_DEFAULT); + + if (is_default_name && !is_default_id) { + EM_LOG(EM_LOG_ERR, + "Default name \"%s\" with non-default ID %d\n", + EM_POOL_DEFAULT_NAME, (int)(uintptr_t)conf->pool); + return -1; + } + + if (is_default_id && !is_default_name) { + EM_LOG(EM_LOG_ERR, + "Default pool ID 1 with non-default name \"%s\"\n", + pool_name); + return -1; + } + } + + if (ret == 1) { /* Pool name is given and no conflict with pool ID */ + strncpy(conf->name, pool_name, EM_POOL_NAME_LEN - 1); + conf->name[EM_POOL_NAME_LEN - 1] = '\0'; + } + + align_offset = em_libconfig_group_lookup_group(pool_cfg, "align_offset"); + /*align_offset is provided*/ + if (align_offset && read_config_align_offset(align_offset, pool_cfg_str, cfg)) + return -1; + + user_area = em_libconfig_group_lookup_group(pool_cfg, "user_area"); + if (user_area && read_config_user_area(user_area, pool_cfg_str, cfg)) + return -1; + + headroom = em_libconfig_group_lookup_group(pool_cfg, "pkt.headroom"); + if (headroom) { + if (read_config_pkt_headroom(headroom, pool_cfg_str, cfg)) + return -1; + + /* Ignore the given pkt.headroom for non packet event type */ + if (conf->cfg.event_type != EM_EVENT_TYPE_PACKET) + EM_PRINT("pkt.headroom will be ignored for non packet type!\n"); + } + + return 0; +} + +/* Print option: startup_pools from the EM config file */ +static void print_config_startup_pools(void) +{ + startup_pool_conf_t *conf; + char str_conf[32]; + const char *str = ""; + + EM_PRINT(" startup_pools.num: %u\n", em_shm->opt.startup_pools.num); + + for (uint32_t i = 0; i < em_shm->opt.startup_pools.num; i++) { + conf = &em_shm->opt.startup_pools.conf[i]; + + snprintf(str_conf, sizeof(str_conf), " startup_pools.conf[%d]", i); + + if (*conf->name) + EM_PRINT("%s.name: %s\n", str_conf, conf->name); + + if (conf->pool) + EM_PRINT("%s.pool: %d\n", str_conf, (int)(uintptr_t)conf->pool); + + /*event type*/ + if (conf->cfg.event_type == EM_EVENT_TYPE_SW) + str = "EM_EVENT_TYPE_SW"; + else if (conf->cfg.event_type == EM_EVENT_TYPE_PACKET) + str = "EM_EVENT_TYPE_PACKET"; + else if (conf->cfg.event_type == EM_EVENT_TYPE_VECTOR) + str = "EM_EVENT_TYPE_VECTOR"; + EM_PRINT("%s.pool_cfg.event_type: %s\n", str_conf, str); + + /*align_offset*/ + str = conf->cfg.align_offset.in_use ? "true" : "false"; + EM_PRINT("%s.pool_cfg.align_offset.in_use: %s\n", str_conf, str); + EM_PRINT("%s.pool_cfg.align_offset.value: %d\n", str_conf, + conf->cfg.align_offset.value); + + /*user area*/ + str = conf->cfg.user_area.in_use ? "true" : "false"; + EM_PRINT("%s.pool_cfg.user_area.in_use: %s\n", str_conf, str); + EM_PRINT("%s.pool_cfg.user_area.size: %ld\n", str_conf, + conf->cfg.user_area.size); + + /*pkt headroom*/ + str = conf->cfg.pkt.headroom.in_use ? "true" : "false"; + EM_PRINT("%s.pool_cfg.pkt.headroom.in_use: %s\n", str_conf, str); + EM_PRINT("%s.pool_cfg.pkt.headroom.value: %d\n", str_conf, + conf->cfg.pkt.headroom.value); + + /*number of subpools*/ + EM_PRINT("%s.pool_cfg.num_subpools: %u\n", str_conf, + conf->cfg.num_subpools); + + /*subpools*/ + for (int j = 0; j < conf->cfg.num_subpools; j++) { + EM_PRINT("%s.pool_cfg.subpools[%d].size: %u\n", str_conf, + j, conf->cfg.subpool[j].size); + + EM_PRINT("%s.pool_cfg.subpools[%d].num: %u\n", str_conf, + j, conf->cfg.subpool[j].num); + + EM_PRINT("%s.pool_cfg.subpools[%d].cache_size: %u\n", + str_conf, j, conf->cfg.subpool[j].cache_size); + } + } +} + +/* Read option: startup_pools from the EM config file */ +static int read_config_startup_pools(void) +{ + int ret; + int list_len; + int num_startup_pools; + const libconfig_list_t *conf_list; + libconfig_setting_t *default_setting; + libconfig_setting_t *runtime_setting; + libconfig_setting_t *startup_pools_setting; + + em_libconfig_lookup(&em_shm->libconfig, "startup_pools", + &default_setting, &runtime_setting); + + /* + * Option: startup_pools + * + * Optional. Thus, when runtime configuration is provided, and option + * "startup_pools" is given, use it. However, when option "startup_pools" + * is not specified in the given runtime configuration file, returns + * without giving error, which means no startup pools will be created. + * Note that it does not fall back to use the option "startup_pools" + * specified in the default configuration file. + */ + if (em_shm->libconfig.has_cfg_runtime) { + if (runtime_setting) + startup_pools_setting = runtime_setting; + else + return 0; + } else { + if (default_setting) + startup_pools_setting = default_setting; + else + return 0; + } + + EM_PRINT("EM-startup_pools config:\n"); + /* + * Option: startup_pools.num + * Mandatory when startup_pools option is given + */ + ret = em_libconfig_setting_lookup_int(startup_pools_setting, "num", + &num_startup_pools); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Option 'startup_pools.num' not found\n"); + return -1; + } + + if (num_startup_pools <= 0 || num_startup_pools > EM_CONFIG_POOLS - 1) { + EM_LOG(EM_LOG_ERR, + "Number of startup_pools %d is too large or too small\n" + "Valid value range is [1, %d]\n", + num_startup_pools, EM_CONFIG_POOLS - 1); + return -1; + } + + conf_list = em_libconfig_setting_get_list(startup_pools_setting, "conf"); + if (!conf_list) { + EM_LOG(EM_LOG_ERR, "Conf option 'startup_pools.conf' not found\n"); + return -1; + } + + list_len = em_libconfig_list_length(conf_list); + if (list_len != num_startup_pools) { + EM_LOG(EM_LOG_ERR, + "The number of pool configuration(s) given in\n" + "'startup_pools.conf':%d does not match number of\n" + "startup_pools specified in 'startup_pools.num': %d\n", + list_len, num_startup_pools); + return -1; + } + + for (int i = 0; i < list_len; i++) { + if (read_config_startup_pools_conf(conf_list, i) < 0) + return -1; + } + + em_shm->opt.startup_pools.num = num_startup_pools; + + print_config_startup_pools(); + return 0; +} + +/* Read option: pool from the EM config file */ +static int read_config_pool(void) +{ + const char *conf_str; + bool val_bool = false; + int val = 0; + int ret; + + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + + EM_PRINT("EM-pool config:\n"); + + /* + * Option: pool.statistics_enable + */ + conf_str = "pool.statistics_enable"; + ret = em_libconfig_lookup_bool(&em_shm->libconfig, conf_str, &val_bool); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + + if (val_bool) { + if (!capa->buf.stats.bit.available || !capa->pkt.stats.bit.available) { + EM_LOG(EM_LOG_ERR, "! %s: NOT supported by ODP - disabling!\n", + conf_str); + val_bool = false; /* disable pool statistics, no ODP support! */ + } + + if (!capa->buf.stats.bit.cache_available || !capa->pkt.stats.bit.cache_available) { + EM_LOG(EM_LOG_ERR, "! %s: omit events in pool cache, no ODP support!\n", + conf_str); + } + } + + /* store & print the value */ + em_shm->opt.pool.statistics_enable = (int)val_bool; + EM_PRINT(" %s: %s(%d)\n", conf_str, val_bool ? "true" : "false", + val_bool); + + /* + * Option: pool.align_offset + */ + conf_str = "pool.align_offset"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0 || val > ALIGN_OFFSET_MAX || !POWEROF2(val)) { + EM_LOG(EM_LOG_ERR, + "Bad config value '%s = %d' (max: %d and value must be power of 2)\n", + conf_str, val, ALIGN_OFFSET_MAX); + return -1; + } + /* store & print the value */ + em_shm->opt.pool.align_offset = val; + EM_PRINT(" %s (default): %d (max: %d)\n", + conf_str, val, ALIGN_OFFSET_MAX); + + /* + * Option: pool.user_area_size + */ + conf_str = "pool.user_area_size"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0 || (unsigned int)val > capa->pkt.max_uarea_size || + val > EM_EVENT_USER_AREA_MAX_SIZE) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.pool.user_area_size = val; + EM_PRINT(" %s (default): %d (max: %d)\n", + conf_str, val, + MIN(EM_EVENT_USER_AREA_MAX_SIZE, capa->pkt.max_uarea_size)); + + /* + * Option: pool.pkt_headroom + */ + conf_str = "pool.pkt_headroom"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + + if (val < 0 || (unsigned int)val > capa->pkt.max_headroom) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.pool.pkt_headroom = val; + EM_PRINT(" %s (default): %d (max: %u)\n", + conf_str, val, capa->pkt.max_headroom); + + return 0; +} + +static int +read_config_file(void) +{ + /* Option: pool */ + if (read_config_pool() < 0) + return -1; + + /* Option: startup_pools */ + if (read_config_startup_pools() < 0) + return -1; + + return 0; +} + +em_status_t +pool_init(mpool_tbl_t *const mpool_tbl, mpool_pool_t *const mpool_pool, + const em_pool_cfg_t *default_pool_cfg) +{ + int ret; + em_pool_t pool; + em_pool_t pool_default; + startup_pool_conf_t *startup_pool_conf; + bool default_pool_set = false; + const int cores = em_core_count(); + + memset(mpool_tbl, 0, sizeof(mpool_tbl_t)); + memset(mpool_pool, 0, sizeof(mpool_pool_t)); + env_atomic32_init(&em_shm->pool_count); + + ret = objpool_init(&mpool_pool->objpool, cores); + if (ret != 0) + return EM_ERR_OPERATION_FAILED; + + for (int i = 0; i < EM_CONFIG_POOLS; i++) { + pool = pool_idx2hdl(i); + mpool_elem_t *mpool_elem = pool_elem_get(pool); + + if (unlikely(!mpool_elem)) + return EM_ERR_BAD_POINTER; + + mpool_elem->em_pool = pool; + mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; + for (int j = 0; j < EM_MAX_SUBPOOLS; j++) { + mpool_elem->odp_pool[j] = ODP_POOL_INVALID; + mpool_elem->size[j] = 0; + } + + objpool_add(&mpool_pool->objpool, i % cores, + &mpool_elem->objpool_elem); + } + + /* Init the mapping tbl from odp-pool(=subpool) index to em-pool */ + if (odp_pool_max_index() >= POOL_ODP2EM_TBL_LEN) + return EM_ERR_TOO_LARGE; + for (int i = 0; i < POOL_ODP2EM_TBL_LEN; i++) + mpool_tbl->pool_odp2em[i] = EM_POOL_UNDEF; + + /* Store common ODP pool capabilities in the mpool_tbl for easy access*/ + if (odp_pool_capability(&mpool_tbl->odp_pool_capability) != 0) + return EM_ERR_LIB_FAILED; + + /* Read EM-pool and EM-startup_pools related runtime config options */ + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + /* + * Create default and startup pools. + * + * If default pool configuration is given through 'startup_pools.conf' + * in em-odp.conf, use that instead. Otherwise use default_pool_cfg. + * + * Allocate/reserve default pool first here so when creating startup + * pools whose configuration does not provide pool handle, default pool + * handle EM_POOL_DEFAULT(1) won't be allocated to them. + */ + pool_default = pool_alloc(EM_POOL_DEFAULT); + + if (unlikely(pool_default == EM_POOL_UNDEF || + pool_default != EM_POOL_DEFAULT)) + return EM_ERR_ALLOC_FAILED; + + /* Create startup pools whose configuration is provided by the EM config file */ + for (uint32_t i = 0; i < em_shm->opt.startup_pools.num; i++) { + startup_pool_conf = &em_shm->opt.startup_pools.conf[i]; + + /* Default pool is provided by the EM config file */ + if (strstr(startup_pool_conf->name, EM_POOL_DEFAULT_NAME) || + startup_pool_conf->pool == EM_POOL_DEFAULT) { + default_pool_set = true; + pool_free(EM_POOL_DEFAULT); + pool = em_pool_create(EM_POOL_DEFAULT_NAME, + EM_POOL_DEFAULT, + &startup_pool_conf->cfg); + } else { + pool = em_pool_create(startup_pool_conf->name, + startup_pool_conf->pool, + &startup_pool_conf->cfg); + } + + if (pool == EM_POOL_UNDEF) + return EM_ERR_ALLOC_FAILED; + } + + /* Create the default pool if it is not provided by the EM config file */ + if (!default_pool_set) { + pool_free(EM_POOL_DEFAULT); + pool = em_pool_create(EM_POOL_DEFAULT_NAME, EM_POOL_DEFAULT, + default_pool_cfg); + if (pool == EM_POOL_UNDEF || pool != EM_POOL_DEFAULT) + return EM_ERR_ALLOC_FAILED; + } + + return EM_OK; +} + +em_status_t +pool_term(const mpool_tbl_t *mpool_tbl) +{ + em_status_t stat = EM_OK; + + (void)mpool_tbl; + + EM_PRINT("\n" + "Status before delete:\n"); + em_pool_info_print_all(); + + for (int i = 0; i < EM_CONFIG_POOLS; i++) { + em_pool_t pool = pool_idx2hdl(i); + const mpool_elem_t *mpool_elem = pool_elem_get(pool); + em_status_t ret; + + if (mpool_elem && pool_allocated(mpool_elem)) { + ret = pool_delete(pool); + if (ret != EM_OK) + stat = ret; /* save last error as return val */ + } + } + + return stat; +} + +/* Helper func to invalid_pool_cfg() */ +static int invalid_pool_cache_cfg(const em_pool_cfg_t *pool_cfg, + const char **err_str/*out*/) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + uint32_t min_cache_size; + uint32_t cache_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_SW) + min_cache_size = capa->buf.min_cache_size; + else if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) + min_cache_size = capa->pkt.min_cache_size; + else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) + min_cache_size = capa->vector.min_cache_size; + else + return -9; + + for (int i = 0; i < pool_cfg->num_subpools; i++) { + if (pool_cfg->subpool[i].size <= 0 || + pool_cfg->subpool[i].num <= 0) { + *err_str = "Invalid subpool size/num"; + return -(1 * 10 + i); /* -10, -11, ... */ + } + + cache_size = pool_cfg->subpool[i].cache_size; + if (unlikely(cache_size < min_cache_size)) { + *err_str = "Requested cache size too small"; + return -(2 * 10 + i); /* -20, -21, ... */ + } + /* + * If the given cache size is larger than odp-max, + * then use odp-max: + * if (cache_size > max_cache_size) + * cache_size = max_cache_size; + * This is done later in pool_create(); + */ + } + + return 0; +} + +int invalid_pool_cfg(const em_pool_cfg_t *pool_cfg, const char **err_str/*out*/) +{ + int ret = 0; + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + if (!pool_cfg) { + *err_str = "Pool config NULL"; + return -1; + } + if (pool_cfg->__internal_check != EM_CHECK_INIT_CALLED) { + *err_str = "Pool config not initialized"; + return -1; + } + + if (pool_cfg->num_subpools <= 0 || + pool_cfg->num_subpools > EM_MAX_SUBPOOLS) { + *err_str = "Invalid number of subpools"; + return -1; + } + + ret = is_pool_type_supported(pool_cfg->event_type, err_str/*out*/); + if (ret) + return ret; + + if (!is_align_offset_valid(pool_cfg)) { + *err_str = "Invalid align offset"; + return -1; + } + + ret = is_user_area_valid(pool_cfg, capa, err_str/*out*/); + if (ret) + return ret; + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET && + pool_cfg->pkt.headroom.in_use && + pool_cfg->pkt.headroom.value > capa->pkt.max_headroom) { + *err_str = "Requested pkt headroom size too large"; + return -1; + } + + ret = invalid_pool_cache_cfg(pool_cfg, err_str/*out*/); + + return ret; /* 0: success, <0: error */ +} + +/* + * Helper to pool_create() - preallocate all events in the pool for ESV to + * maintain event state over multiple alloc- and free-operations. + */ +static void +pool_prealloc(const mpool_elem_t *pool_elem) +{ + event_prealloc_hdr_t *prealloc_hdr = NULL; + uint64_t num_tot = 0; + uint64_t num = 0; + uint64_t num_free = 0; + const uint32_t size = pool_elem->pool_cfg.subpool[0].size; + list_node_t evlist; + list_node_t *node; + + list_init(&evlist); + + for (int i = 0; i < pool_elem->num_subpools; i++) + num_tot += pool_elem->pool_cfg.subpool[i].num; + + do { + prealloc_hdr = event_prealloc(pool_elem, size, pool_elem->event_type); + if (likely(prealloc_hdr)) { + list_add(&evlist, &prealloc_hdr->list_node); + num++; + } + } while (prealloc_hdr); + + if (unlikely(num < num_tot)) + INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_SMALL), + EM_ESCOPE_POOL_CREATE, + "alloc: events expected:%" PRIu64 " actual:%" PRIu64 "", + num_tot, num); + + while (!list_is_empty(&evlist)) { + node = list_rem_first(&evlist); + prealloc_hdr = list_node_to_prealloc_hdr(node); + em_free(prealloc_hdr->ev_hdr.event); + num_free++; + } + + if (unlikely(num_free > num)) + INTERNAL_ERROR(EM_FATAL(EM_ERR_TOO_LARGE), + EM_ESCOPE_POOL_CREATE, + "free: events expected:%" PRIu64 " actual:%" PRIu64 "", + num, num_free); +} + +/* + * pool_create() helper: sort subpool cfg in ascending order based on buf size + */ +static void +sort_pool_cfg(const em_pool_cfg_t *pool_cfg, em_pool_cfg_t *sorted_cfg /*out*/) +{ + const int num_subpools = pool_cfg->num_subpools; + + *sorted_cfg = *pool_cfg; + + for (int i = 0; i < num_subpools - 1; i++) { + int idx = i; /* array index containing smallest size */ + + for (int j = i + 1; j < num_subpools; j++) { + if (sorted_cfg->subpool[j].size < + sorted_cfg->subpool[idx].size) + idx = j; /* store idx to smallest */ + } + + /* min size at [idx], swap with [i] */ + if (idx != i) { + uint32_t size = sorted_cfg->subpool[i].size; + uint32_t num = sorted_cfg->subpool[i].num; + uint32_t cache_size = sorted_cfg->subpool[i].cache_size; + + sorted_cfg->subpool[i] = sorted_cfg->subpool[idx]; + + sorted_cfg->subpool[idx].size = size; + sorted_cfg->subpool[idx].num = num; + sorted_cfg->subpool[idx].cache_size = cache_size; + } + } +} + +/* + * pool_create() helper: set pool event-cache size. + * + * Set the requested subpool cache-size based on user provided value and + * limit set by odp-pool-capability. + * Requested value can be larger than odp-max, use odp--max in this + * case. + * Verification against odp-min value done in invalid_pool_cfg(). + */ +static void +set_poolcache_size(em_pool_cfg_t *pool_cfg) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + int num_subpools = pool_cfg->num_subpools; + uint32_t max_cache_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_SW) + max_cache_size = capa->buf.max_cache_size; + else if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) + max_cache_size = capa->pkt.max_cache_size; + else /* EM_EVENT_TYPE_VECTOR */ + max_cache_size = capa->vector.max_cache_size; + + for (int i = 0; i < num_subpools; i++) { + if (max_cache_size < pool_cfg->subpool[i].cache_size) + pool_cfg->subpool[i].cache_size = max_cache_size; + } +} + +/* + * pool_create() helper: determine payload alignment. + */ +static int +set_align(const em_pool_cfg_t *pool_cfg, + uint32_t *align_offset /*out*/, uint32_t *odp_align /*out*/) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + uint32_t offset = 0; + uint32_t align = ODP_CACHE_LINE_SIZE; + + /* Pool-specific param overrides config file 'align_offset' value */ + if (pool_cfg->align_offset.in_use) + offset = pool_cfg->align_offset.value; /* pool cfg */ + else + offset = em_shm->opt.pool.align_offset; /* cfg file */ + + /* Set subpool minimum alignment */ + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + if (align > capa->pkt.max_align) + align = capa->pkt.max_align; + } else { + if (align > capa->buf.max_align) + align = capa->buf.max_align; + } + + *align_offset = offset; + *odp_align = align; + + /* verify alignment requirements */ + if (!POWEROF2(align) || align <= offset) + return -1; + + return 0; +} + +/* + * pool_create() helper: determine user area size. + */ +static int +set_uarea_size(const em_pool_cfg_t *pool_cfg, uint32_t align_offset, + size_t *uarea_req_size/*out*/, size_t *uarea_pad_size/*out*/) +{ + size_t req_size = 0; + size_t pad_size = 0; + size_t max_size = 0; + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + + if (pool_cfg->user_area.in_use) /* use pool-cfg */ + req_size = pool_cfg->user_area.size; + else /* use cfg-file */ + req_size = em_shm->opt.pool.user_area_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + pad_size = req_size; + max_size = MIN(capa->pkt.max_uarea_size, + EM_EVENT_USER_AREA_MAX_SIZE); + } else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { + pad_size = req_size; + max_size = MIN(capa->vector.max_uarea_size, + EM_EVENT_USER_AREA_MAX_SIZE); + } else if (req_size > 0) { + /* EM_EVENT_TYPE_SW: bufs */ + /* Note: contains align_offset extra space for adjustment */ + pad_size = ROUND_UP(req_size + align_offset, 32); + max_size = EM_EVENT_USER_AREA_MAX_SIZE; + } + + if (req_size > max_size) + return -1; + + *uarea_req_size = req_size; + *uarea_pad_size = pad_size; + return 0; +} + +/* + * pool_create() helper: set the pkt headroom + */ +static int +set_pkt_headroom(const em_pool_cfg_t *pool_cfg, + uint32_t *pkt_headroom /*out*/, + uint32_t *max_headroom /*out, for err print only*/) +{ + const odp_pool_capability_t *capa = + &em_shm->mpool_tbl.odp_pool_capability; + /* default value from cfg file */ + uint32_t headroom = em_shm->opt.pool.pkt_headroom; + + /* Pool-specific param overrides config file value */ + if (pool_cfg->pkt.headroom.in_use) + headroom = pool_cfg->pkt.headroom.value; + + *pkt_headroom = headroom; + *max_headroom = capa->pkt.max_headroom; + + if (unlikely(headroom > capa->pkt.max_headroom)) + return -1; + + return 0; +} + +/** Helper to create_subpools() */ +static void set_pool_params_pkt(odp_pool_param_t *pool_params /* out */, + uint32_t size, uint32_t num, uint32_t cache_size, + uint32_t align_offset, uint32_t odp_align, + uint32_t uarea_size, uint32_t pkt_headroom) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + odp_pool_param_init(pool_params); + + pool_params->type = ODP_POOL_PACKET; + /* num == max_num, helps pool-info stats calculation */ + pool_params->pkt.num = num; + pool_params->pkt.max_num = num; + + if (size > align_offset) + size = size - align_offset; + else + size = 1; /* 0:default, can be big => use 1 */ + /* len == max_len */ + pool_params->pkt.len = size; + pool_params->pkt.max_len = size; + pool_params->pkt.seg_len = size; + pool_params->pkt.align = odp_align; + /* + * Reserve space for the event header in each packet's + * ODP-user-area: + */ + pool_params->pkt.uarea_size = sizeof(event_hdr_t) + uarea_size; + /* + * Set the pkt headroom. + * Make sure the alloc-alignment fits into the headroom. + */ + pool_params->pkt.headroom = pkt_headroom; + if (pkt_headroom < align_offset) + pool_params->pkt.headroom = align_offset; + + pool_params->pkt.cache_size = cache_size; + + /* Pkt pool statistics */ + pool_params->stats.all = 0; + if (em_shm->opt.pool.statistics_enable) { + if (capa->pkt.stats.bit.available) + pool_params->stats.bit.available = 1; + if (capa->pkt.stats.bit.cache_available) + pool_params->stats.bit.cache_available = 1; + } +} + +static void set_pool_params_vector(odp_pool_param_t *pool_params /* out */, + uint32_t size, uint32_t num, + uint32_t cache_size, uint32_t uarea_size) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + odp_pool_param_init(pool_params); + + pool_params->type = ODP_POOL_VECTOR; + pool_params->vector.num = num; + pool_params->vector.max_size = size; + /* Reserve space for the EM event header in the vector's ODP-user-area */ + pool_params->vector.uarea_size = sizeof(event_hdr_t) + uarea_size; + pool_params->vector.cache_size = cache_size; + + /* Vector pool statistics */ + pool_params->stats.all = 0; + if (em_shm->opt.pool.statistics_enable) { + if (capa->vector.stats.bit.available) + pool_params->stats.bit.available = 1; + if (capa->vector.stats.bit.cache_available) + pool_params->stats.bit.cache_available = 1; + } +} + +/** Helper to create_subpools() */ +static void set_pool_params_buf(odp_pool_param_t *pool_params /* out */, + uint32_t size, uint32_t num, uint32_t cache_size, + uint32_t odp_align, uint32_t uarea_size) +{ + const odp_pool_capability_t *capa = &em_shm->mpool_tbl.odp_pool_capability; + + odp_pool_param_init(pool_params); + + pool_params->type = ODP_POOL_BUFFER; + pool_params->buf.num = num; + pool_params->buf.size = size + sizeof(event_hdr_t) + uarea_size; + pool_params->buf.align = odp_align; + pool_params->buf.cache_size = cache_size; + + /* Buf pool statistics */ + pool_params->stats.all = 0; + if (em_shm->opt.pool.statistics_enable) { + if (capa->buf.stats.bit.available) + pool_params->stats.bit.available = 1; + if (capa->buf.stats.bit.cache_available) + pool_params->stats.bit.cache_available = 1; + } +} + +static int +create_subpools(const em_pool_cfg_t *pool_cfg, + uint32_t align_offset, uint32_t odp_align, + uint32_t uarea_size, uint32_t pkt_headroom, + mpool_elem_t *mpool_elem /*out*/) +{ + const int num_subpools = pool_cfg->num_subpools; + mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; + + for (int i = 0; i < num_subpools; i++) { + char pool_name[ODP_POOL_NAME_LEN]; + odp_pool_param_t pool_params; + uint32_t size = pool_cfg->subpool[i].size; + uint32_t num = pool_cfg->subpool[i].num; + uint32_t cache_size = pool_cfg->subpool[i].cache_size; + + if (pool_cfg->event_type == EM_EVENT_TYPE_PACKET) { + set_pool_params_pkt(&pool_params /* out */, + size, num, cache_size, + align_offset, odp_align, + uarea_size, pkt_headroom); + } else if (pool_cfg->event_type == EM_EVENT_TYPE_VECTOR) { + set_pool_params_vector(&pool_params /* out */, + size, num, cache_size, + uarea_size); + } else { /* pool_cfg->event_type == EM_EVENT_TYPE_SW */ + set_pool_params_buf(&pool_params /* out */, + size, num, cache_size, + odp_align, uarea_size); + } + + snprintf(pool_name, sizeof(pool_name), "%" PRI_POOL ":%d-%s", + mpool_elem->em_pool, i, mpool_elem->name); + pool_name[sizeof(pool_name) - 1] = '\0'; + + odp_pool_t odp_pool = odp_pool_create(pool_name, &pool_params); + + if (unlikely(odp_pool == ODP_POOL_INVALID)) + return -1; + + int odp_pool_idx = odp_pool_index(odp_pool); + + if (unlikely(odp_pool_idx < 0)) + return -2; + + /* Store mapping from odp-pool (idx) to em-pool */ + mpool_tbl->pool_odp2em[odp_pool_idx] = mpool_elem->em_pool; + + mpool_elem->odp_pool[i] = odp_pool; + mpool_elem->size[i] = pool_cfg->subpool[i].size; + mpool_elem->num_subpools++; /* created subpools for delete */ + + /* odp_pool_print(odp_pool); */ + } + + return 0; +} + +em_pool_t +pool_create(const char *name, em_pool_t req_pool, const em_pool_cfg_t *pool_cfg) +{ + const em_event_type_t pool_evtype = pool_cfg->event_type; + int err = 0; + + /* Allocate a free EM pool */ + const em_pool_t pool = pool_alloc(req_pool/* requested or undef*/); + + if (unlikely(pool == EM_POOL_UNDEF)) + return EM_POOL_UNDEF; + + mpool_elem_t *mpool_elem = pool_elem_get(pool); + + /* Sanity check */ + if (!mpool_elem || mpool_elem->em_pool != pool) + return EM_POOL_UNDEF; + + mpool_elem->event_type = pool_evtype; + /* Store successfully created subpools later */ + mpool_elem->num_subpools = 0; + /* Store the event pool name, if given */ + if (name && *name) { + strncpy(mpool_elem->name, name, sizeof(mpool_elem->name)); + mpool_elem->name[sizeof(mpool_elem->name) - 1] = '\0'; + } else { + mpool_elem->name[0] = '\0'; + } + + em_pool_cfg_t sorted_cfg; + + /* + * Sort the subpool cfg in ascending order based on the buffer size + */ + sort_pool_cfg(pool_cfg, &sorted_cfg/*out*/); + /* Use sorted_cfg instead of pool_cfg from here on */ + + /* + * Set the cache-size of each subpool in the EM-pool + */ + set_poolcache_size(&sorted_cfg); + + /* Store the sorted config */ + mpool_elem->pool_cfg = sorted_cfg; + + /* + * Event payload alignment requirement for the pool + */ + uint32_t align_offset = 0; + uint32_t odp_align = 0; + + /* align only valid for bufs and pkts */ + if (pool_evtype == EM_EVENT_TYPE_SW || + pool_evtype == EM_EVENT_TYPE_PACKET) { + err = set_align(&sorted_cfg, &align_offset/*out*/, + &odp_align/*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" align mismatch:\n" + "align:%u cfg:align_offset:%u", + name, odp_align, align_offset); + goto error; + } + } + /* store the align offset, needed in pkt-alloc */ + mpool_elem->align_offset = align_offset; + + /* + * Event user area size. + * Pool-specific param overrides config file 'user_area_size' value + */ + size_t uarea_req_size = 0; + size_t uarea_pad_size = 0; + + err = set_uarea_size(&sorted_cfg, align_offset, + &uarea_req_size/*out*/, &uarea_pad_size/*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" invalid uarea config:\n" + "req.size:%zu => padded uarea size:%zu", + name, uarea_req_size, uarea_pad_size); + goto error; + } + + /* store the user_area sizes, needed in alloc */ + mpool_elem->user_area.req_size = uarea_req_size & UINT16_MAX; + mpool_elem->user_area.pad_size = uarea_pad_size & UINT16_MAX; + + /* + * Set the headroom for events in EM packet pools + */ + uint32_t pkt_headroom = 0; + uint32_t max_headroom = 0; + + if (pool_evtype == EM_EVENT_TYPE_PACKET) { + err = set_pkt_headroom(&sorted_cfg, &pkt_headroom/*out*/, + &max_headroom/*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_TOO_LARGE, EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" invalid pkt headroom:\n" + "headroom:%u vs. max:headroom:%u", + name, pkt_headroom, max_headroom); + goto error; + } + } + + /* + * Create the subpools for the EM event-pool. + * Each EM subpool is an ODP pool. + */ + err = create_subpools(&sorted_cfg, align_offset, odp_align, + uarea_pad_size, pkt_headroom, mpool_elem /*out*/); + if (unlikely(err)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_ALLOC_FAILED), + EM_ESCOPE_POOL_CREATE, + "EM-pool:\"%s\" create fails:%d\n" + "subpools req:%d vs. subpools created:%d", + name, err, sorted_cfg.num_subpools, + mpool_elem->num_subpools); + goto error; + } + + /* + * ESV: preallocate all events in the pool + */ + if (esv_enabled() && em_shm->opt.esv.prealloc_pools) + pool_prealloc(mpool_elem); + + /* Success! */ + return mpool_elem->em_pool; + +error: + (void)pool_delete(pool); + return EM_POOL_UNDEF; +} + +em_status_t +pool_delete(em_pool_t pool) +{ + mpool_tbl_t *const mpool_tbl = &em_shm->mpool_tbl; + mpool_elem_t *const mpool_elem = pool_elem_get(pool); + + if (unlikely(mpool_elem == NULL || !pool_allocated(mpool_elem))) + return EM_ERR_BAD_STATE; + + for (int i = 0; i < mpool_elem->num_subpools; i++) { + odp_pool_t odp_pool = mpool_elem->odp_pool[i]; + int odp_pool_idx; + int ret; + + if (odp_pool == ODP_POOL_INVALID) + return EM_ERR_NOT_FOUND; + + odp_pool_idx = odp_pool_index(odp_pool); + if (unlikely(odp_pool_idx < 0)) + return EM_ERR_BAD_ID; + + ret = odp_pool_destroy(odp_pool); + if (unlikely(ret)) + return EM_ERR_LIB_FAILED; + + /* Clear mapping from odp-pool (idx) to em-pool */ + mpool_tbl->pool_odp2em[odp_pool_idx] = EM_POOL_UNDEF; + + mpool_elem->odp_pool[i] = ODP_POOL_INVALID; + mpool_elem->size[i] = 0; + } + + mpool_elem->name[0] = '\0'; + mpool_elem->event_type = EM_EVENT_TYPE_UNDEF; + mpool_elem->num_subpools = 0; + + return pool_free(pool); +} + +em_pool_t +pool_find(const char *name) +{ + if (name && *name) { + for (int i = 0; i < EM_CONFIG_POOLS; i++) { + const mpool_elem_t *mpool_elem = + &em_shm->mpool_tbl.pool[i]; + + if (pool_allocated(mpool_elem) && + !strncmp(name, mpool_elem->name, EM_POOL_NAME_LEN)) + return mpool_elem->em_pool; + } + } + + return EM_POOL_UNDEF; +} + +unsigned int +pool_count(void) +{ + return env_atomic32_get(&em_shm->pool_count); +} + +#define POOL_INFO_HDR_STR \ +" id name type offset uarea sizes [size count(used/free) cache]\n" + +#define POOL_INFO_SUBSTR_FMT \ +"%d:[sz=%" PRIu32 " n=%" PRIu32 "(%" PRIu32 "/%" PRIu32 ") $=%" PRIu32 "]" + +#define POOL_INFO_SUBSTR_NO_STATS_FMT \ +"%d:[sz=%" PRIu32 " n=%" PRIu32 "(-/-) cache=%" PRIu32 "]" + +void pool_info_print_hdr(unsigned int num_pools) +{ + if (num_pools == 1) { + EM_PRINT("EM Event Pool\n" + "-------------\n" + POOL_INFO_HDR_STR); + } else { + EM_PRINT("EM Event Pools:%2u\n" + "-----------------\n" + POOL_INFO_HDR_STR, num_pools); + } +} + +void pool_info_print(em_pool_t pool) +{ + em_pool_info_t pool_info; + em_status_t stat; + const char *pool_type; + + stat = em_pool_info(pool, &pool_info/*out*/); + if (unlikely(stat != EM_OK)) { + EM_PRINT(" %-6" PRI_POOL " %-16s n/a n/a n/a n/a [n/a]\n", + pool, "err:n/a"); + return; + } + + if (pool_info.event_type == EM_EVENT_TYPE_VECTOR) + pool_type = "vec"; + else if (pool_info.event_type == EM_EVENT_TYPE_PACKET) + pool_type = "pkt"; + else + pool_type = "buf"; + + EM_PRINT(" %-6" PRI_POOL " %-16s %4s %02u %02zu %02u ", + pool, pool_info.name, pool_type, + pool_info.align_offset, pool_info.user_area_size, + pool_info.num_subpools); + + for (int i = 0; i < pool_info.num_subpools; i++) { + char subpool_str[42]; + + if (em_shm->opt.pool.statistics_enable) { + snprintf(subpool_str, sizeof(subpool_str), + POOL_INFO_SUBSTR_FMT, i, + pool_info.subpool[i].size, + pool_info.subpool[i].num, + pool_info.subpool[i].used, + pool_info.subpool[i].free, + pool_info.subpool[i].cache_size); + } else { + snprintf(subpool_str, sizeof(subpool_str), + POOL_INFO_SUBSTR_NO_STATS_FMT, i, + pool_info.subpool[i].size, + pool_info.subpool[i].num, + pool_info.subpool[i].cache_size); + } + subpool_str[sizeof(subpool_str) - 1] = '\0'; + EM_PRINT(" %-42s", subpool_str); + } + + EM_PRINT("\n"); +} diff --git a/src/em_pool.h b/src/em_pool.h index 7cea010c..5d33d2b4 100644 --- a/src/em_pool.h +++ b/src/em_pool.h @@ -1,157 +1,152 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef EM_POOL_H_ -#define EM_POOL_H_ - -/** - * @file - * EM internal event pool functions - * - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#define valid_pool(pool) ((unsigned int)pool_hdl2idx((pool)) < \ - EM_CONFIG_POOLS) -#define invalid_pool(pool) ((unsigned int)pool_hdl2idx((pool)) > \ - EM_CONFIG_POOLS - 1) - -int invalid_pool_cfg(const em_pool_cfg_t *pool_cfg, - const char **err_str/*out*/); - -em_status_t -pool_init(mpool_tbl_t *const mpool_tbl, mpool_pool_t *const mpool_pool, - const em_pool_cfg_t *default_pool_cfg); - -em_status_t -pool_term(const mpool_tbl_t *pool_tbl); - -em_pool_t -pool_create(const char *name, em_pool_t req_pool, const em_pool_cfg_t *pool_cfg); - -em_status_t -pool_delete(em_pool_t pool); - -em_pool_t -pool_find(const char *name); - -void pool_info_print_hdr(unsigned int num_pools); -void pool_info_print(em_pool_t pool); - -/** Convert pool handle to pool index */ -static inline int -pool_hdl2idx(em_pool_t pool) -{ - return (int)(uintptr_t)pool - 1; -} - -/** Convert pool index to pool handle */ -static inline em_pool_t -pool_idx2hdl(int pool_idx) -{ - return (em_pool_t)(uintptr_t)(pool_idx + 1); -} - -/** Returns pool element associated with pool handle */ -static inline mpool_elem_t * -pool_elem_get(em_pool_t pool) -{ - const int pool_idx = pool_hdl2idx(pool); - mpool_elem_t *mpool_elem; - - if (unlikely((unsigned int)pool_idx > EM_CONFIG_POOLS - 1)) - return NULL; - - mpool_elem = &em_shm->mpool_tbl.pool[pool_idx]; - - return mpool_elem; -} - -static inline int -pool_allocated(const mpool_elem_t *const mpool_elem) -{ - return !objpool_in_pool(&mpool_elem->objpool_elem); -} - -static inline int -pool_find_subpool(const mpool_elem_t *const pool_elem, size_t size) -{ - int subpool = 0; - - if (EM_MAX_SUBPOOLS > 1) { /* Compile time option */ - int i; - /* Find the optimal subpool to allocate the event from */ - for (i = 0; i < pool_elem->num_subpools && - size > pool_elem->size[i]; i++) - ; - - if (unlikely(i >= pool_elem->num_subpools)) - return -1; - - subpool = i; - } - - return subpool; -} - -unsigned int -pool_count(void); - -/** - * Get the EM event-pool that an odp-pool belongs to. - * - * An EM event-pool consists of up to EM_MAX_SUBPOOLS subpools (that are - * odp-pools) - a table (em_shm->mpool_tbl.pool_odp2em[]) contains the - * mapping and is populated during em_pool_create() calls. - */ -static inline em_pool_t -pool_odp2em(odp_pool_t odp_pool) -{ - /* - * 'idx' is in the range: 0 to odp_pool_max_index(), which is smaller - * than the length of the em_shm->mpool_tbl.pool_odp2em[] array - * (verified at startup in pool_init()). - */ - int idx = odp_pool_index(odp_pool); - - if (unlikely(idx < 0)) - return EM_POOL_UNDEF; - - return em_shm->mpool_tbl.pool_odp2em[idx]; -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_POOL_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef EM_POOL_H_ +#define EM_POOL_H_ + +/** + * @file + * EM internal event pool functions + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#define valid_pool(pool) ((unsigned int)pool_hdl2idx((pool)) < \ + EM_CONFIG_POOLS) +#define invalid_pool(pool) ((unsigned int)pool_hdl2idx((pool)) > \ + EM_CONFIG_POOLS - 1) + +int invalid_pool_cfg(const em_pool_cfg_t *pool_cfg, + const char **err_str/*out*/); + +em_status_t +pool_init(mpool_tbl_t *const mpool_tbl, mpool_pool_t *const mpool_pool, + const em_pool_cfg_t *default_pool_cfg); + +em_status_t +pool_term(const mpool_tbl_t *pool_tbl); + +em_pool_t +pool_create(const char *name, em_pool_t req_pool, const em_pool_cfg_t *pool_cfg); + +em_status_t +pool_delete(em_pool_t pool); + +em_pool_t +pool_find(const char *name); + +void pool_info_print_hdr(unsigned int num_pools); +void pool_info_print(em_pool_t pool); + +/** Convert pool handle to pool index */ +static inline int +pool_hdl2idx(em_pool_t pool) +{ + return (int)(uintptr_t)pool - 1; +} + +/** Convert pool index to pool handle */ +static inline em_pool_t +pool_idx2hdl(int pool_idx) +{ + return (em_pool_t)(uintptr_t)(pool_idx + 1); +} + +/** Returns pool element associated with pool handle */ +static inline mpool_elem_t * +pool_elem_get(em_pool_t pool) +{ + const int pool_idx = pool_hdl2idx(pool); + mpool_elem_t *mpool_elem; + + if (unlikely((unsigned int)pool_idx > EM_CONFIG_POOLS - 1)) + return NULL; + + mpool_elem = &em_shm->mpool_tbl.pool[pool_idx]; + + return mpool_elem; +} + +static inline int +pool_allocated(const mpool_elem_t *const mpool_elem) +{ + return !objpool_in_pool(&mpool_elem->objpool_elem); +} + +static inline int +pool_find_subpool(const mpool_elem_t *const pool_elem, uint32_t size) +{ + int subpool; + + /* Find the optimal subpool to allocate the event from */ + for (subpool = 0; subpool < pool_elem->num_subpools && + size > pool_elem->size[subpool]; subpool++) + ; + + if (unlikely(subpool >= pool_elem->num_subpools)) + return -1; + + return subpool; +} + +unsigned int +pool_count(void); + +/** + * Get the EM event-pool that an odp-pool belongs to. + * + * An EM event-pool consists of up to EM_MAX_SUBPOOLS subpools (that are + * odp-pools) - a table (em_shm->mpool_tbl.pool_odp2em[]) contains the + * mapping and is populated during em_pool_create() calls. + */ +static inline em_pool_t +pool_odp2em(odp_pool_t odp_pool) +{ + /* + * 'idx' is in the range: 0 to odp_pool_max_index(), which is smaller + * than the length of the em_shm->mpool_tbl.pool_odp2em[] array + * (verified at startup in pool_init()). + */ + int idx = odp_pool_index(odp_pool); + + if (unlikely(idx < 0)) + return EM_POOL_UNDEF; + + return em_shm->mpool_tbl.pool_odp2em[idx]; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_POOL_H_ */ diff --git a/src/em_queue.c b/src/em_queue.c index 9b8187a0..e2fed569 100644 --- a/src/em_queue.c +++ b/src/em_queue.c @@ -1,1677 +1,1686 @@ -/* - * Copyright (c) 2015-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -#define EM_Q_BASENAME "EM_Q_" - -/** - * Queue create-params passed to queue_setup...() - */ -typedef struct { - const char *name; - em_queue_type_t type; - em_queue_prio_t prio; - em_atomic_group_t atomic_group; - em_queue_group_t queue_group; - const em_queue_conf_t *conf; -} queue_setup_t; - -/** - * Default queue create conf to use if not provided by the user - */ -static const em_queue_conf_t default_queue_conf = { - .flags = EM_QUEUE_FLAG_DEFAULT, - .min_events = 0, /* use EM default value */ - .conf_len = 0, /* .conf is ignored if this is 0 */ - .conf = NULL -}; - -static int -queue_init_prio_map(int minp, int maxp, int nump); -static void -queue_init_prio_legacy(int minp, int maxp); -static void -queue_init_prio_adaptive(int minp, int maxp, int nump); -static int -queue_init_prio_custom(int minp, int maxp); - -static inline int -queue_create_check_sched(const queue_setup_t *setup, const char **err_str); - -static inline em_queue_t -queue_alloc(em_queue_t queue, const char **err_str); - -static inline em_status_t -queue_free(em_queue_t queue); - -static int -queue_setup(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str); -static void -queue_setup_common(queue_elem_t *q_elem, const queue_setup_t *setup); -static void -queue_setup_odp_common(const queue_setup_t *setup, - odp_queue_param_t *odp_queue_param); -static int -queue_setup_scheduled(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str); -static int -queue_setup_unscheduled(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str); -static int -queue_setup_local(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str); -static int -queue_setup_output(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str); - -static inline queue_elem_t * -queue_poolelem2queue(objpool_elem_t *const queue_pool_elem) -{ - return (queue_elem_t *)((uintptr_t)queue_pool_elem - - offsetof(queue_elem_t, queue_pool_elem)); -} - -static int -read_config_file(void) -{ - const char *conf_str; - int val = 0; - int ret; - - EM_PRINT("EM-queue config:\n"); - - /* - * Option: queue.min_events_default - */ - conf_str = "queue.min_events_default"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); - return -1; - } - if (val < 0) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", - conf_str, val); - return -1; - } - /* store & print the value */ - em_shm->opt.queue.min_events_default = val; - EM_PRINT(" %s: %d\n", conf_str, val); - - /* - * Option: queue.prio_map_mode - */ - conf_str = "queue.priority.map_mode"; - ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); - return -1; - } - if (val < 0 || val > 2) { - EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", conf_str, val); - return -1; - } - em_shm->opt.queue.priority.map_mode = val; - EM_PRINT(" %s: %d\n", conf_str, val); - - if (val == 2) { /* custom map */ - conf_str = "queue.priority.custom_map"; - ret = em_libconfig_lookup_array(&em_shm->libconfig, conf_str, - em_shm->opt.queue.priority.custom_map, - EM_QUEUE_PRIO_NUM); - if (unlikely(!ret)) { - EM_LOG(EM_LOG_ERR, "Config option '%s' not found or invalid\n", conf_str); - return -1; - } - EM_PRINT(" %s: [", conf_str); - for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { - EM_PRINT("%d", em_shm->opt.queue.priority.custom_map[i]); - if (i < (EM_QUEUE_PRIO_NUM - 1)) - EM_PRINT(","); - } - EM_PRINT("]\n"); - } - return 0; -} - -/** - * Helper: initialize a queue pool (populate pool with q_elems) - */ -static int -queue_pool_init(queue_tbl_t *const queue_tbl, - queue_pool_t *const queue_pool, - int min_qidx, int max_qidx) -{ - const int cores = em_core_count(); - const int qs_per_pool = (max_qidx - min_qidx + 1); - int qs_per_subpool = qs_per_pool / cores; - int qs_leftover = qs_per_pool % cores; - int subpool_idx = 0; - int add_cnt = 0; - int i; - - if (objpool_init(&queue_pool->objpool, cores) != 0) - return -1; - - for (i = min_qidx; i <= max_qidx; i++) { - objpool_add(&queue_pool->objpool, subpool_idx, - &queue_tbl->queue_elem[i].queue_pool_elem); - add_cnt++; - if (add_cnt == qs_per_subpool + qs_leftover) { - subpool_idx++; /* add to next subpool */ - qs_leftover = 0; /* added leftovers to subpool 0 */ - add_cnt = 0; - } - } - - return 0; -} - -/** - * Initialize the EM queues - */ -em_status_t -queue_init(queue_tbl_t *const queue_tbl, - queue_pool_t *const queue_pool, - queue_pool_t *const queue_pool_static) -{ - odp_queue_capability_t *const odp_queue_capa = - &queue_tbl->odp_queue_capability; - odp_schedule_capability_t *const odp_sched_capa = - &queue_tbl->odp_schedule_capability; - int min; - int max; - int ret; - - memset(queue_tbl, 0, sizeof(queue_tbl_t)); - memset(queue_pool, 0, sizeof(queue_pool_t)); - memset(queue_pool_static, 0, sizeof(queue_pool_t)); - env_atomic32_init(&em_shm->queue_count); - - if (read_config_file()) - return EM_ERR_LIB_FAILED; - - /* Retieve and store the ODP queue capabilities into 'queue_tbl' */ - ret = odp_queue_capability(odp_queue_capa); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "odp_queue_capability():%d failed", ret); - - /* Retieve and store the ODP schedule capabilities into 'queue_tbl' */ - ret = odp_schedule_capability(odp_sched_capa); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "odp_schedule_capability():%d failed", ret); - - RETURN_ERROR_IF(odp_queue_capa->max_queues < EM_MAX_QUEUES, - EM_ERR_TOO_LARGE, EM_ESCOPE_INIT, - "EM_MAX_QUEUES:%i > odp-max-queues:%u", - EM_MAX_QUEUES, odp_queue_capa->max_queues); - - /* Initialize the queue element table */ - for (int i = 0; i < EM_MAX_QUEUES; i++) - queue_tbl->queue_elem[i].queue = queue_idx2hdl(i); - - /* Initialize the static queue pool */ - min = queue_id2idx(EM_QUEUE_STATIC_MIN); - max = queue_id2idx(LAST_INTERNAL_QUEUE); - if (queue_pool_init(queue_tbl, queue_pool_static, min, max) != 0) - return EM_ERR_LIB_FAILED; - - /* Initialize the dynamic queue pool */ - min = queue_id2idx(FIRST_DYN_QUEUE); - max = queue_id2idx(LAST_DYN_QUEUE); - if (queue_pool_init(queue_tbl, queue_pool, min, max) != 0) - return EM_ERR_LIB_FAILED; - - /* Initialize priority mapping, adapt to values from ODP */ - min = odp_schedule_min_prio(); - max = odp_schedule_max_prio(); - em_shm->queue_prio.num_runtime = max - min + 1; - ret = queue_init_prio_map(min, max, em_shm->queue_prio.num_runtime); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "mapping odp priorities failed: %d", ret); - return EM_OK; -} - -/** - * Queue inits done during EM core local init (once at startup on each core). - * - * Initialize event storage for queues of type 'EM_QUEUE_TYPE_LOCAL'. - */ -em_status_t -queue_init_local(void) -{ - int prio; - int core; - char name[20]; - odp_queue_param_t param; - em_locm_t *const locm = &em_locm; - - core = em_core_id(); - - odp_queue_param_init(¶m); - param.type = ODP_QUEUE_TYPE_PLAIN; - param.enq_mode = ODP_QUEUE_OP_MT_UNSAFE; - param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; - param.order = ODP_QUEUE_ORDER_IGNORE; - param.size = 512; - - locm->local_queues.empty = 1; - - for (prio = 0; prio < EM_QUEUE_PRIO_NUM; prio++) { - snprintf(name, sizeof(name), - "local-q:c%02d:prio%d", core, prio); - name[sizeof(name) - 1] = '\0'; - - locm->local_queues.prio[prio].empty_prio = 1; - locm->local_queues.prio[prio].queue = - odp_queue_create(name, ¶m); - if (unlikely(locm->local_queues.prio[prio].queue == - ODP_QUEUE_INVALID)) - return EM_ERR_ALLOC_FAILED; - } - - memset(&locm->output_queue_track, 0, - sizeof(locm->output_queue_track)); - - return EM_OK; -} - -/** - * Queue termination done during em_term_core(). - * - * Flush & destroy event storage for queues of type 'EM_QUEUE_TYPE_LOCAL'. - */ -em_status_t -queue_term_local(void) -{ - em_event_t event; - event_hdr_t *ev_hdr; - em_status_t stat = EM_OK; - int ret; - bool esv_ena = esv_enabled(); - - /* flush all events */ - while ((ev_hdr = local_queue_dequeue()) != NULL) { - event = event_hdr_to_event(ev_hdr); - if (esv_ena) - event = evstate_em2usr(event, ev_hdr, - EVSTATE__TERM_CORE__QUEUE_LOCAL); - em_free(event); - } - - for (int prio = 0; prio < EM_QUEUE_PRIO_NUM; prio++) { - ret = odp_queue_destroy(em_locm.local_queues.prio[prio].queue); - if (unlikely(ret != 0)) - stat = EM_ERR_LIB_FAILED; - } - - return stat; -} - -/** - * Allocate a new EM queue - * - * @param queue EM queue handle if a specific EM queue is requested, - * EM_QUEUE_UNDEF if any EM queue will do. - * - * @return EM queue handle - * @retval EM_QUEUE_UNDEF on failure - */ -static inline em_queue_t -queue_alloc(em_queue_t queue, const char **err_str) -{ - queue_elem_t *queue_elem; - objpool_elem_t *queue_pool_elem; - - if (queue == EM_QUEUE_UNDEF) { - /* - * Allocate a dynamic queue, i.e. take next available - */ - queue_pool_elem = objpool_rem(&em_shm->queue_pool.objpool, - em_core_id()); - if (unlikely(queue_pool_elem == NULL)) { - *err_str = "queue pool element alloc failed!"; - return EM_QUEUE_UNDEF; - } - queue_elem = queue_poolelem2queue(queue_pool_elem); - } else { - /* - * Allocate a specific static-handle queue, handle given - */ - internal_queue_t iq; - - iq.queue = queue; - if (iq.queue_id < EM_QUEUE_STATIC_MIN || - iq.queue_id > LAST_INTERNAL_QUEUE) { - *err_str = "queue handle not from static range!"; - return EM_QUEUE_UNDEF; - } - - queue_elem = queue_elem_get(queue); - if (unlikely(queue_elem == NULL)) { - *err_str = "queue_elem ptr NULL!"; - return EM_QUEUE_UNDEF; - } - /* Verify that the queue is not allocated */ - if (queue_allocated(queue_elem)) { - *err_str = "queue already allocated!"; - return EM_QUEUE_UNDEF; - } - /* Remove the queue from the pool */ - int ret = objpool_rem_elem(&em_shm->queue_pool_static.objpool, - &queue_elem->queue_pool_elem); - if (unlikely(ret != 0)) { - *err_str = "static queue pool element alloc failed!"; - return EM_QUEUE_UNDEF; - } - } - - env_atomic32_inc(&em_shm->queue_count); - return queue_elem->queue; -} - -static inline em_status_t -queue_free(em_queue_t queue) -{ - queue_elem_t *const queue_elem = queue_elem_get(queue); - objpool_t *objpool; - internal_queue_t iq; - - iq.queue = queue; - - if (unlikely(queue_elem == NULL)) - return EM_ERR_BAD_ID; - - if (iq.queue_id >= EM_QUEUE_STATIC_MIN && - iq.queue_id <= LAST_INTERNAL_QUEUE) - objpool = &em_shm->queue_pool_static.objpool; - else - objpool = &em_shm->queue_pool.objpool; - - queue_elem->state = EM_QUEUE_STATE_INVALID; - - objpool_add(objpool, - queue_elem->queue_pool_elem.subpool_idx, - &queue_elem->queue_pool_elem); - - env_atomic32_dec(&em_shm->queue_count); - return EM_OK; -} - -static int -queue_create_check_sched(const queue_setup_t *setup, const char **err_str) -{ - const queue_group_elem_t *queue_group_elem = NULL; - const atomic_group_elem_t *ag_elem = NULL; - - queue_group_elem = queue_group_elem_get(setup->queue_group); - /* scheduled queues are always associated with a queue group */ - if (unlikely(queue_group_elem == NULL || !queue_group_allocated(queue_group_elem))) { - *err_str = "Invalid queue group!"; - return -1; - } - - if (setup->atomic_group != EM_ATOMIC_GROUP_UNDEF) { - ag_elem = atomic_group_elem_get(setup->atomic_group); - if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { - *err_str = "Invalid atomic group!"; - return -1; - } - } - - if (unlikely(setup->prio >= EM_QUEUE_PRIO_NUM)) { - *err_str = "Invalid queue priority!"; - return -1; - } - return 0; -} - -static int -queue_create_check_args(const queue_setup_t *setup, const char **err_str) -{ - /* scheduled queue */ - if (setup->type == EM_QUEUE_TYPE_ATOMIC || - setup->type == EM_QUEUE_TYPE_PARALLEL || - setup->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) - return queue_create_check_sched(setup, err_str); - - /* other queue types */ - switch (setup->type) { - case EM_QUEUE_TYPE_UNSCHEDULED: - /* API arg checks for unscheduled queues */ - if (unlikely(setup->prio != EM_QUEUE_PRIO_UNDEF)) { - *err_str = "Invalid priority for unsched queue!"; - return -1; - } - if (unlikely(setup->queue_group != EM_QUEUE_GROUP_UNDEF)) { - *err_str = "Queue group not used with unsched queues!"; - return -1; - } - if (unlikely(setup->atomic_group != EM_ATOMIC_GROUP_UNDEF)) { - *err_str = "Atomic group not used with unsched queues!"; - return -1; - } - break; - - case EM_QUEUE_TYPE_LOCAL: - /* API arg checks for local queues */ - if (unlikely(setup->queue_group != EM_QUEUE_GROUP_UNDEF)) { - *err_str = "Queue group not used with local queues!"; - return -1; - } - if (unlikely(setup->atomic_group != EM_ATOMIC_GROUP_UNDEF)) { - *err_str = "Atomic group not used with local queues!"; - return -1; - } - if (unlikely(setup->prio >= EM_QUEUE_PRIO_NUM)) { - *err_str = "Invalid queue priority!"; - return -1; - } - break; - - case EM_QUEUE_TYPE_OUTPUT: - /* API arg checks for output queues */ - if (unlikely(setup->queue_group != EM_QUEUE_GROUP_UNDEF)) { - *err_str = "Queue group not used with output queues!"; - return -1; - } - if (unlikely(setup->atomic_group != EM_ATOMIC_GROUP_UNDEF)) { - *err_str = "Atomic group not used with output queues!"; - return -1; - } - if (unlikely(setup->conf == NULL || - setup->conf->conf_len < sizeof(em_output_queue_conf_t) || - setup->conf->conf == NULL)) { - *err_str = "Invalid output queue conf"; - return -1; - } - break; - - default: - *err_str = "Unknown queue type"; - return -1; - } - - return 0; -} - -/** - * Create an EM queue: alloc, setup and add to queue group list - */ -em_queue_t -queue_create(const char *name, em_queue_type_t type, em_queue_prio_t prio, - em_queue_group_t queue_group, em_queue_t queue_req, - em_atomic_group_t atomic_group, const em_queue_conf_t *conf, - const char **err_str) -{ - int err; - - /* Use default EM queue conf if none given */ - if (conf == NULL) - conf = &default_queue_conf; - - queue_setup_t setup = {.name = name, .type = type, .prio = prio, - .atomic_group = atomic_group, - .queue_group = queue_group, .conf = conf}; - - err = queue_create_check_args(&setup, err_str); - if (err) { - /* 'err_str' set by queue_create_check_args() */ - return EM_QUEUE_UNDEF; - } - - /* - * Allocate the queue handle and obtain the corresponding queue-element - */ - const char *alloc_err_str = ""; - - em_queue_t queue = queue_alloc(queue_req, &alloc_err_str); - - if (unlikely(queue == EM_QUEUE_UNDEF)) { - *err_str = alloc_err_str; - return EM_QUEUE_UNDEF; - } - if (unlikely(queue_req != EM_QUEUE_UNDEF && queue_req != queue)) { - queue_free(queue); - *err_str = "Failed to allocate the requested queue!"; - return EM_QUEUE_UNDEF; - } - - queue_elem_t *queue_elem = queue_elem_get(queue); - - if (unlikely(!queue_elem)) { - queue_free(queue); - *err_str = "Queue elem NULL!"; - return EM_QUEUE_UNDEF; - } - - /* - * Setup/configure the queue - */ - err = queue_setup(queue_elem, &setup, err_str); - if (unlikely(err)) { - queue_free(queue); - /* 'err_str' set by queue_setup() */ - return EM_QUEUE_UNDEF; - } - - return queue; -} - -em_status_t -queue_delete(queue_elem_t *const queue_elem) -{ - queue_state_t old_state; - queue_state_t new_state; - em_status_t ret; - em_queue_t queue = queue_elem->queue; - em_queue_type_t type = queue_elem->type; - - if (unlikely(!queue_allocated(queue_elem))) - return EM_ERR_BAD_STATE; - - old_state = queue_elem->state; - new_state = EM_QUEUE_STATE_INVALID; - - if (type != EM_QUEUE_TYPE_UNSCHEDULED && - type != EM_QUEUE_TYPE_OUTPUT) { - /* verify scheduled queue state transition */ - ret = queue_state_change__check(old_state, new_state, - 0/*!is_setup*/); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_DELETE, - "EM-Q:%" PRI_QUEUE " inv. state change:%d=>%d", - queue, old_state, new_state); - } - - if (type != EM_QUEUE_TYPE_UNSCHEDULED && - type != EM_QUEUE_TYPE_LOCAL && - type != EM_QUEUE_TYPE_OUTPUT) { - queue_group_elem_t *const queue_group_elem = - queue_group_elem_get(queue_elem->queue_group); - - RETURN_ERROR_IF(queue_group_elem == NULL || - !queue_group_allocated(queue_group_elem), - EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_DELETE, - "Invalid queue group: %" PRI_QGRP "", - queue_elem->queue_group); - - /* Remove the queue from the queue group list */ - queue_group_rem_queue_list(queue_group_elem, queue_elem); - } - - if (type == EM_QUEUE_TYPE_OUTPUT) { - env_spinlock_t *const lock = &queue_elem->output.lock; - q_elem_output_t *const q_out = &queue_elem->output; - - env_spinlock_lock(lock); - /* Drain any remaining events from the output queue */ - output_queue_drain(queue_elem); - env_spinlock_unlock(lock); - - /* delete the fn-args storage if allocated in create */ - if (q_out->output_fn_args_event != EM_EVENT_UNDEF) { - em_free(q_out->output_fn_args_event); - q_out->output_fn_args_event = EM_EVENT_UNDEF; - } - } - - if (queue_elem->odp_queue != ODP_QUEUE_INVALID) { - int err = odp_queue_destroy(queue_elem->odp_queue); - - RETURN_ERROR_IF(err, EM_ERR_LIB_FAILED, EM_ESCOPE_QUEUE_DELETE, - "EM-Q:%" PRI_QUEUE ":odp_queue_destroy(" PRIu64 "):%d", - queue, odp_queue_to_u64(queue_elem->odp_queue), - err); - } - - queue_elem->odp_queue = ODP_QUEUE_INVALID; - - /* Zero queue name */ - em_shm->queue_tbl.name[queue_hdl2idx(queue)][0] = '\0'; - - /* Remove the queue from the atomic group it belongs to, if any */ - atomic_group_remove_queue(queue_elem); - - return queue_free(queue); -} - -/** - * Setup an allocated/created queue before use. - */ -static int -queue_setup(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str) -{ - int ret; - - /* Set common queue-elem fields based on setup */ - queue_setup_common(q_elem, setup); - - switch (setup->type) { - case EM_QUEUE_TYPE_ATOMIC: /* fallthrough */ - case EM_QUEUE_TYPE_PARALLEL: /* fallthrough */ - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - ret = queue_setup_scheduled(q_elem, setup, err_str); - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - ret = queue_setup_unscheduled(q_elem, setup, err_str); - break; - case EM_QUEUE_TYPE_LOCAL: - ret = queue_setup_local(q_elem, setup, err_str); - break; - case EM_QUEUE_TYPE_OUTPUT: - ret = queue_setup_output(q_elem, setup, err_str); - break; - default: - *err_str = "Queue setup: unknown queue type"; - ret = -1; - break; - } - - if (unlikely(ret)) - return -1; - - env_sync_mem(); - return 0; -} - -/** - * Helper function to queue_setup() - * - * Set EM queue params common to all EM queues based on EM config - */ -static void -queue_setup_common(queue_elem_t *q_elem /*out*/, const queue_setup_t *setup) -{ - const em_queue_t queue = q_elem->queue; - char *const qname = &em_shm->queue_tbl.name[queue_hdl2idx(queue)][0]; - - /* Store queue name */ - if (setup->name) - strncpy(qname, setup->name, EM_QUEUE_NAME_LEN); - else /* default unique name: "EM_Q_" + Q-id = e.g. EM_Q_1234 */ - snprintf(qname, EM_QUEUE_NAME_LEN, - "%s%" PRI_QUEUE "", EM_Q_BASENAME, queue); - qname[EM_QUEUE_NAME_LEN - 1] = '\0'; - - /* Init q_elem fields based on setup params and clear the rest */ - q_elem->type = setup->type; - q_elem->priority = setup->prio; - q_elem->queue_group = setup->queue_group; - q_elem->atomic_group = setup->atomic_group; - /* q_elem->conf = not stored, configured later */ - - /* Clear the rest */ - q_elem->odp_queue = ODP_QUEUE_INVALID; - q_elem->scheduled = EM_FALSE; - q_elem->state = EM_QUEUE_STATE_INVALID; - q_elem->context = NULL; - q_elem->eo = EM_EO_UNDEF; - q_elem->eo_elem = NULL; - q_elem->eo_ctx = NULL; - q_elem->use_multi_rcv = 0; - q_elem->max_events = 0; - q_elem->receive_func = NULL; - q_elem->receive_multi_func = NULL; -} - -/** - * Helper function to queue_setup_...() - * - * Set common ODP queue params based on EM config - */ -static void -queue_setup_odp_common(const queue_setup_t *setup, - odp_queue_param_t *odp_queue_param /*out*/) -{ - /* - * Set ODP queue params according to EM queue conf flags - */ - const em_queue_conf_t *conf = setup->conf; - em_queue_flag_t flags = conf->flags & EM_QUEUE_FLAG_MASK; - - if (flags != EM_QUEUE_FLAG_DEFAULT) { - if (flags & EM_QUEUE_FLAG_NONBLOCKING_WF) - odp_queue_param->nonblocking = ODP_NONBLOCKING_WF; - else if (flags & EM_QUEUE_FLAG_NONBLOCKING_LF) - odp_queue_param->nonblocking = ODP_NONBLOCKING_LF; - - if (flags & EM_QUEUE_FLAG_ENQ_NOT_MTSAFE) - odp_queue_param->enq_mode = ODP_QUEUE_OP_MT_UNSAFE; - if (flags & EM_QUEUE_FLAG_DEQ_NOT_MTSAFE) - odp_queue_param->deq_mode = ODP_QUEUE_OP_MT_UNSAFE; - } - - /* - * Set minimum queue size if other than 'default'(0) - */ - if (conf->min_events == 0) { - /* use EM default value from config file: */ - unsigned int size = em_shm->opt.queue.min_events_default; - - if (size != 0) - odp_queue_param->size = size; - /* else: use odp default as set by odp_queue_param_init() */ - } else { - /* use user provided value: */ - odp_queue_param->size = conf->min_events; - } -} - -/** - * Create an ODP queue for the newly created EM queue - */ -static int create_odp_queue(queue_elem_t *q_elem, - const odp_queue_param_t *odp_queue_param) -{ - char odp_name[ODP_QUEUE_NAME_LEN]; - odp_queue_t odp_queue; - - (void)queue_get_name(q_elem, odp_name/*out*/, sizeof(odp_name)); - - odp_queue = odp_queue_create(odp_name, odp_queue_param); - if (unlikely(odp_queue == ODP_QUEUE_INVALID)) - return -1; - - /* Store the corresponding ODP Queue */ - q_elem->odp_queue = odp_queue; - - return 0; -} - -/** - * Helper function to queue_setup() - * - * Set EM and ODP queue params for scheduled queues - */ -static int -queue_setup_scheduled(queue_elem_t *q_elem /*in,out*/, - const queue_setup_t *setup, const char **err_str) -{ - /* validity checks done earlier for queue_group */ - queue_group_elem_t *qgrp_elem = queue_group_elem_get(setup->queue_group); - int err; - - if (unlikely(qgrp_elem == NULL)) { - *err_str = "Q-setup-sched: invalid queue group!"; - return -1; - } - - q_elem->priority = setup->prio; - q_elem->type = setup->type; - q_elem->queue_group = setup->queue_group; - q_elem->atomic_group = setup->atomic_group; - - q_elem->scheduled = EM_TRUE; - q_elem->state = EM_QUEUE_STATE_INIT; - - /* - * Set up a scheduled ODP queue for the EM scheduled queue - */ - odp_queue_param_t odp_queue_param; - odp_schedule_sync_t odp_schedule_sync; - odp_schedule_prio_t odp_prio; - - /* Init odp queue params to default values */ - odp_queue_param_init(&odp_queue_param); - /* Set common ODP queue params based on the EM Queue config */ - queue_setup_odp_common(setup, &odp_queue_param /*out*/); - - err = scheduled_queue_type_em2odp(setup->type, - &odp_schedule_sync /*out*/); - if (unlikely(err)) { - *err_str = "Q-setup-sched: invalid queue type!"; - return -2; - } - - err = prio_em2odp(setup->prio, &odp_prio /*out*/); - if (unlikely(err)) { - *err_str = "Q-setup-sched: invalid queue priority!"; - return -3; - } - - odp_queue_param.type = ODP_QUEUE_TYPE_SCHED; - odp_queue_param.sched.prio = odp_prio; - odp_queue_param.sched.sync = odp_schedule_sync; - odp_queue_param.sched.group = qgrp_elem->odp_sched_group; - - /* Retrieve previously stored ODP scheduler capabilities */ - const odp_schedule_capability_t *odp_sched_capa = - &em_shm->queue_tbl.odp_schedule_capability; - - /* - * Check nonblocking level against sched queue capabilities. - * Related ODP queue params set earlier in queue_setup_common(). - */ - if (odp_queue_param.nonblocking == ODP_NONBLOCKING_LF && - odp_sched_capa->lockfree_queues == ODP_SUPPORT_NO) { - *err_str = "Q-setup-sched: non-blocking, lock-free sched queues unavailable"; - return -4; - } - if (odp_queue_param.nonblocking == ODP_NONBLOCKING_WF && - odp_sched_capa->waitfree_queues == ODP_SUPPORT_NO) { - *err_str = "Q-setup-sched: non-blocking, wait-free sched queues unavailable"; - return -5; - } - if (odp_queue_param.enq_mode != ODP_QUEUE_OP_MT || - odp_queue_param.deq_mode != ODP_QUEUE_OP_MT) { - *err_str = "Q-setup-sched: invalid flag: scheduled queues must be MT-safe"; - return -6; - } - - /* - * Note: The ODP queue context points to the EM queue elem. - * The EM queue context set by the user using the API function - * em_queue_set_context() is accessed through the queue_elem_t::context - * and retrieved with em_queue_get_context() or passed by EM to the - * EO-receive function for scheduled queues. - */ - odp_queue_param.context = q_elem; - /* - * Set the context data length (in bytes) for potential prefetching. - * The ODP implementation may use this value as a hint for the number - * of context data bytes to prefetch. - */ - odp_queue_param.context_len = sizeof(*q_elem); - - err = create_odp_queue(q_elem, &odp_queue_param); - if (unlikely(err)) { - *err_str = "Q-setup-sched: scheduled odp queue creation failed!"; - return -7; - } - - /* - * Add the scheduled queue to the queue group - */ - queue_group_add_queue_list(qgrp_elem, q_elem); - - return 0; -} - -/* - * Helper function to queue_setup() - * - * Set EM and ODP queue params for unscheduled queues - */ -static int -queue_setup_unscheduled(queue_elem_t *q_elem /*in,out*/, - const queue_setup_t *setup, const char **err_str) -{ - q_elem->priority = EM_QUEUE_PRIO_UNDEF; - q_elem->type = EM_QUEUE_TYPE_UNSCHEDULED; - q_elem->queue_group = EM_QUEUE_GROUP_UNDEF; - q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; - /* unscheduled queues are not scheduled */ - q_elem->scheduled = EM_FALSE; - q_elem->state = EM_QUEUE_STATE_UNSCHEDULED; - - /* - * Set up a plain ODP queue for the EM unscheduled queue. - */ - odp_queue_param_t odp_queue_param; - /* Retrieve previously stored ODP queue capabilities */ - const odp_queue_capability_t *odp_queue_capa = - &em_shm->queue_tbl.odp_queue_capability; - - /* Init odp queue params to default values */ - odp_queue_param_init(&odp_queue_param); - /* Set common ODP queue params based on the EM Queue config */ - queue_setup_odp_common(setup, &odp_queue_param); - - odp_queue_param.type = ODP_QUEUE_TYPE_PLAIN; - /* don't order events enqueued into unsched queues */ - odp_queue_param.order = ODP_QUEUE_ORDER_IGNORE; - - /* - * Check nonblocking level against plain queue capabilities. - * Related ODP queue params set earlier in queue_setup_common(). - */ - if (odp_queue_param.nonblocking == ODP_NONBLOCKING_LF && - odp_queue_capa->plain.lockfree.max_num == 0) { - *err_str = "Q-setup-unsched: non-blocking, lock-free unsched queues unavailable"; - return -1; - } - if (odp_queue_param.nonblocking == ODP_NONBLOCKING_WF && - odp_queue_capa->plain.waitfree.max_num == 0) { - *err_str = "Q-setup-unsched: non-blocking, wait-free unsched queues unavailable"; - return -2; - } - - /* - * Note: The ODP queue context points to the EM queue elem. - * The EM queue context set by the user using the API function - * em_queue_set_context() is accessed through the queue_elem_t::context - * and retrieved with em_queue_get_context(). - */ - odp_queue_param.context = q_elem; - /* - * Set the context data length (in bytes) for potential prefetching. - * The ODP implementation may use this value as a hint for the number - * of context data bytes to prefetch. - */ - odp_queue_param.context_len = sizeof(*q_elem); - - int err = create_odp_queue(q_elem, &odp_queue_param); - - if (unlikely(err)) { - *err_str = "Q-setup-unsched: plain odp queue creation failed!"; - return -3; - } - - return 0; -} - -/* - * Helper function to queue_setup() - * - * Set EM queue params for (core-)local queues - */ -static int -queue_setup_local(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str) -{ - (void)err_str; - - q_elem->priority = setup->prio; - q_elem->type = EM_QUEUE_TYPE_LOCAL; - q_elem->queue_group = EM_QUEUE_GROUP_UNDEF; - q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; - /* local queues are not scheduled */ - q_elem->scheduled = EM_FALSE; - q_elem->state = EM_QUEUE_STATE_INIT; - - return 0; -} - -/* - * Helper function to queue_setup() - * - * Set EM queue params for output queues - */ -static int -queue_setup_output(queue_elem_t *q_elem, const queue_setup_t *setup, - const char **err_str) -{ - const em_queue_conf_t *qconf = setup->conf; - const em_output_queue_conf_t *output_conf = qconf->conf; - - q_elem->priority = EM_QUEUE_PRIO_UNDEF; - q_elem->type = EM_QUEUE_TYPE_OUTPUT; - q_elem->queue_group = EM_QUEUE_GROUP_UNDEF; - q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; - /* output queues are not scheduled */ - q_elem->scheduled = EM_FALSE; - /* use unsched state for output queues */ - q_elem->state = EM_QUEUE_STATE_UNSCHEDULED; - - if (unlikely(output_conf->output_fn == NULL)) { - *err_str = "Q-setup-output: invalid output function"; - return -1; - } - - /* copy whole output conf */ - q_elem->output.output_conf = *output_conf; - q_elem->output.output_fn_args_event = EM_EVENT_UNDEF; - if (output_conf->args_len == 0) { - /* 'output_fn_args' is ignored, if 'args_len' is 0 */ - q_elem->output.output_conf.output_fn_args = NULL; - } else { - em_event_t args_event; - void *args_storage; - - /* alloc an event to copy the given fn-args into */ - args_event = em_alloc(output_conf->args_len, EM_EVENT_TYPE_SW, - EM_POOL_DEFAULT); - if (unlikely(args_event == EM_EVENT_UNDEF)) { - *err_str = "Q-setup-output: alloc output_fn_args fails"; - return -2; - } - /* store the event handle for em_free() later */ - q_elem->output.output_fn_args_event = args_event; - args_storage = em_event_pointer(args_event); - memcpy(args_storage, output_conf->output_fn_args, - output_conf->args_len); - /* update the args ptr to point to the copied content */ - q_elem->output.output_conf.output_fn_args = args_storage; - } - env_spinlock_init(&q_elem->output.lock); - - /* - * Set up a plain ODP queue for EM output queue (re-)ordering. - * - * EM output-queues need an odp-queue to ensure re-ordering if - * events are sent into it from within an ordered context. - */ - odp_queue_param_t odp_queue_param; - /* Retrieve previously stored ODP queue capabilities */ - const odp_queue_capability_t *odp_queue_capa = - &em_shm->queue_tbl.odp_queue_capability; - - /* Init odp queue params to default values */ - odp_queue_param_init(&odp_queue_param); - /* Set common ODP queue params based on the EM Queue config */ - queue_setup_odp_common(setup, &odp_queue_param); - - odp_queue_param.type = ODP_QUEUE_TYPE_PLAIN; - odp_queue_param.order = ODP_QUEUE_ORDER_KEEP; - - /* check nonblocking level against plain queue capabilities */ - if (odp_queue_param.nonblocking == ODP_NONBLOCKING_LF && - odp_queue_capa->plain.lockfree.max_num == 0) { - *err_str = "Q-setup-output: non-blocking, lock-free unsched queues unavailable"; - return -3; - } - if (odp_queue_param.nonblocking == ODP_NONBLOCKING_WF && - odp_queue_capa->plain.waitfree.max_num == 0) { - *err_str = "Q-setup-output: non-blocking, wait-free unsched queues unavailable"; - return -4; - } - - /* output-queue dequeue protected by q_elem->output.lock */ - odp_queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; - - /* explicitly show here that output queues should not set odp-context */ - odp_queue_param.context = NULL; - - int err = create_odp_queue(q_elem, &odp_queue_param); - - if (unlikely(err)) { - *err_str = "Q-setup-output: plain odp queue creation failed!"; - return -5; - } - - return 0; -} - -/** - * Helper func for queue_state_change() - check that state change is valid - * - * Valid state transitions: - * --------------------------------- - * | |new-state|new-state | - * |old_state|is_setup |is_teardown| - * |---------|---------|-----------| - * |INVALID | INIT | (NULL) | - * |INIT | BIND | INVALID | - * |BIND | READY | INIT | - * |READY | (NULL) | BIND | - * --------------------------------- - * State change check is made easy because the following condition is true - * for valid state transitions: abs(old-new)=1 - */ -em_status_t -queue_state_change__check(queue_state_t old_state, queue_state_t new_state, - int is_setup /* vs. is_teardown */) -{ - uint32_t state_diff; - - if (is_setup) - state_diff = new_state - old_state; - else - state_diff = old_state - new_state; - - return (state_diff == 1) ? EM_OK : EM_ERR_BAD_STATE; -} - -static inline em_status_t -queue_state_set(queue_elem_t *const q_elem, queue_state_t new_state) -{ - const queue_state_t old_state = q_elem->state; - const int is_setup = (new_state == EM_QUEUE_STATE_READY); - em_status_t err; - - /* allow multiple queue_enable/disable() calls */ - if (new_state == old_state && - (new_state == EM_QUEUE_STATE_READY || - new_state == EM_QUEUE_STATE_BIND)) - return EM_OK; - - err = queue_state_change__check(old_state, new_state, is_setup); - if (unlikely(err != EM_OK)) - return err; - - q_elem->state = new_state; - return EM_OK; -} - -/** - * Change the queue state - */ -em_status_t -queue_state_change(queue_elem_t *const q_elem, queue_state_t new_state) -{ - em_status_t err = queue_state_set(q_elem, new_state); - - RETURN_ERROR_IF(err != EM_OK, err, EM_ESCOPE_QUEUE_STATE_CHANGE, - "EM-Q:%" PRI_QUEUE " inv. state: %d=>%d", - q_elem->queue, q_elem->state, new_state); - return EM_OK; -} - -/** - * Change the queue state for all queues associated with the given EO - */ -em_status_t -queue_state_change_all(eo_elem_t *const eo_elem, queue_state_t new_state) -{ - em_status_t err = EM_OK; - queue_elem_t *q_elem; - list_node_t *pos; - const list_node_t *list_node; - - /* - * Loop through all queues associated with the EO, no need for - * eo_elem-lock since this is called only on single core at the - * end of em_eo_start() - */ - env_spinlock_lock(&eo_elem->lock); - - list_for_each(&eo_elem->queue_list, pos, list_node) { - q_elem = list_node_to_queue_elem(list_node); - err = queue_state_set(q_elem, new_state); - if (unlikely(err != EM_OK)) - break; - } /* end loop */ - - env_spinlock_unlock(&eo_elem->lock); - - RETURN_ERROR_IF(err != EM_OK, err, EM_ESCOPE_QUEUE_STATE_CHANGE, - "EM-Q:%" PRI_QUEUE " inv. state: %d=>%d", - q_elem->queue, q_elem->state, new_state); - return EM_OK; -} - -/** - * Enable event reception of an EM queue - */ -em_status_t -queue_enable(queue_elem_t *const q_elem) -{ - em_status_t ret; - - RETURN_ERROR_IF(q_elem == NULL || !queue_allocated(q_elem), - EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_ENABLE, - "Invalid queue"); - - ret = queue_state_change(q_elem, EM_QUEUE_STATE_READY); - - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_ENABLE, - "queue_state_change()->READY fails EM-Q:%" PRI_QUEUE "", - q_elem->queue); - - return EM_OK; -} - -/** - * Enable event reception of ALL queues belonging to an EO - */ -em_status_t -queue_enable_all(eo_elem_t *const eo_elem) -{ - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), - EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_ENABLE_ALL, - "Invalid EO"); - - ret = queue_state_change_all(eo_elem, EM_QUEUE_STATE_READY); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_ENABLE_ALL, - "queue_state_change_all()->READY fails EO:%" PRI_EO "", - eo_elem->eo); - - return EM_OK; -} - -/** - * Disable event reception of an EM queue - */ -em_status_t -queue_disable(queue_elem_t *const q_elem) -{ - em_status_t ret; - - RETURN_ERROR_IF(q_elem == NULL || !queue_allocated(q_elem), - EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_DISABLE, - "Invalid queue"); - - /* Change the state of the queue */ - ret = queue_state_change(q_elem, EM_QUEUE_STATE_BIND); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_DISABLE, - "queue_state_change()->BIND fails, Q:%" PRI_QUEUE "", - q_elem->queue); - - return EM_OK; -} - -/** - * Disable event reception of ALL queues belonging to an EO - */ -em_status_t -queue_disable_all(eo_elem_t *const eo_elem) -{ - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), - EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_DISABLE_ALL, - "Invalid EO"); - - ret = queue_state_change_all(eo_elem, EM_QUEUE_STATE_BIND); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_DISABLE_ALL, - "queue_state_change_all()->BIND: EO:%" PRI_EO "", - eo_elem->eo); - - return EM_OK; -} - -em_event_t queue_dequeue(const queue_elem_t *q_elem) -{ - odp_queue_t odp_queue; - odp_event_t odp_event; - em_event_t em_event; - - odp_queue = q_elem->odp_queue; - odp_event = odp_queue_deq(odp_queue); - if (odp_event == ODP_EVENT_INVALID) - return EM_EVENT_UNDEF; - - em_event = event_odp2em(odp_event); - - if (esv_enabled()) { - event_hdr_t *ev_hdr = event_to_hdr(em_event); - - em_event = evstate_em2usr(em_event, ev_hdr, EVSTATE__DEQUEUE); - } - - return em_event; -} - -int queue_dequeue_multi(const queue_elem_t *q_elem, - em_event_t events[/*out*/], int num) -{ - odp_queue_t odp_queue; - int ret; - - /* use same output-array for dequeue: odp_events[] = events[] */ - odp_event_t *const odp_events = (odp_event_t *)events; - - odp_queue = q_elem->odp_queue; - ret = odp_queue_deq_multi(odp_queue, odp_events /*out*/, num); - if (ret <= 0) - return ret; - - /* now events[] = odp_events[], events[].evgen missing, set below: */ - if (esv_enabled()) { - event_hdr_t *ev_hdrs[ret]; - - event_to_hdr_multi(events, ev_hdrs, ret); - evstate_em2usr_multi(events, ev_hdrs, ret, - EVSTATE__DEQUEUE_MULTI); - } - - return ret; -} - -void print_queue_capa(void) -{ - const odp_queue_capability_t *queue_capa = - &em_shm->queue_tbl.odp_queue_capability; - const odp_schedule_capability_t *sched_capa = - &em_shm->queue_tbl.odp_schedule_capability; - char plain_sz[24] = "n/a"; - char plain_lf_sz[24] = "n/a"; - char plain_wf_sz[24] = "n/a"; - char sched_sz[24] = "nolimit"; - - if (queue_capa->plain.max_size > 0) - snprintf(plain_sz, sizeof(plain_sz), "%u", - queue_capa->plain.max_size); - if (queue_capa->plain.lockfree.max_size > 0) - snprintf(plain_lf_sz, sizeof(plain_lf_sz), "%u", - queue_capa->plain.lockfree.max_size); - if (queue_capa->plain.waitfree.max_size > 0) - snprintf(plain_wf_sz, sizeof(plain_wf_sz), "%u", - queue_capa->plain.waitfree.max_size); - - if (sched_capa->max_queue_size > 0) - snprintf(sched_sz, sizeof(sched_sz), "%u", - sched_capa->max_queue_size); - - plain_sz[sizeof(plain_sz) - 1] = '\0'; - plain_lf_sz[sizeof(plain_lf_sz) - 1] = '\0'; - plain_wf_sz[sizeof(plain_wf_sz) - 1] = '\0'; - sched_sz[sizeof(sched_sz) - 1] = '\0'; - - EM_PRINT("ODP Queue Capabilities\n" - "----------------------\n" - " Max number of ODP queues: %u\n" - " Max number of ODP ordered locks per queue: %u\n" - " Max number of ODP scheduling groups: %u\n" - " Max number of ODP scheduling priorities: %u\n" - " PLAIN queues:\n" - " blocking: count: %6u size: %6s\n" - " nonblocking-lf: count: %6u size: %6s\n" - " nonblocking-wf: count: %6u size: %6s\n" - " SCHED queues:\n" - " blocking: count: %6u size: %6s\n" - " nonblocking-lf: %ssupported\n" - " nonblocking-wf: %ssupported\n\n", - queue_capa->max_queues, sched_capa->max_ordered_locks, - sched_capa->max_groups, sched_capa->max_prios, - queue_capa->plain.max_num, plain_sz, - queue_capa->plain.lockfree.max_num, plain_lf_sz, - queue_capa->plain.waitfree.max_num, plain_wf_sz, - sched_capa->max_queues, sched_sz, - sched_capa->lockfree_queues == ODP_SUPPORT_NO ? "not " : "", - sched_capa->waitfree_queues == ODP_SUPPORT_NO ? "not " : ""); - - EM_PRINT("EM Queues\n" - "---------\n" - " Max number of EM queues: %d (0x%x)\n" - " EM queue handle offset: %d (0x%x)\n" - " EM queue range: [%d - %d] ([0x%x - 0x%x])\n" - " static range: [%d - %d] ([0x%x - 0x%x])\n" - " internal range: [%d - %d] ([0x%x - 0x%x])\n" - " dynamic range: [%d - %d] ([0x%x - 0x%x])\n" - "\n", - EM_MAX_QUEUES, EM_MAX_QUEUES, - EM_QUEUE_RANGE_OFFSET, EM_QUEUE_RANGE_OFFSET, - EM_QUEUE_STATIC_MIN, LAST_DYN_QUEUE, - EM_QUEUE_STATIC_MIN, LAST_DYN_QUEUE, - EM_QUEUE_STATIC_MIN, EM_QUEUE_STATIC_MAX, - EM_QUEUE_STATIC_MIN, EM_QUEUE_STATIC_MAX, - FIRST_INTERNAL_QUEUE, LAST_INTERNAL_QUEUE, - FIRST_INTERNAL_QUEUE, LAST_INTERNAL_QUEUE, - FIRST_DYN_QUEUE, LAST_DYN_QUEUE, - FIRST_DYN_QUEUE, LAST_DYN_QUEUE); -} - -void print_queue_prio_info(void) -{ - #define MAXPRIOBUF 128 - char buf[MAXPRIOBUF]; - int pos = 0; - - for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { - /* comma separated list of priorities */ - int num = snprintf(&buf[pos], MAXPRIOBUF - pos, "%d%c", - em_shm->queue_prio.map[i], - i < (EM_QUEUE_PRIO_NUM - 1) ? ',' : '\0'); - if (num < 0 || num >= (MAXPRIOBUF - pos)) - break; - pos += num; - } - - buf[MAXPRIOBUF - 1] = 0; - EM_PRINT(" Current queue priority map: [%s]\n", buf); -} - -unsigned int -queue_count(void) -{ - return env_atomic32_get(&em_shm->queue_count); -} - -size_t queue_get_name(const queue_elem_t *const q_elem, - char name[/*out*/], const size_t maxlen) -{ - em_queue_t queue = q_elem->queue; - const char *queue_name = &em_shm->queue_tbl.name[queue_hdl2idx(queue)][0]; - size_t len = strnlen(queue_name, EM_QUEUE_NAME_LEN - 1); - - if (maxlen - 1 < len) - len = maxlen - 1; - - if (len) - memcpy(name, queue_name, len); - name[len] = '\0'; - - return len; -} - -static void queue_init_prio_legacy(int minp, int maxp) -{ - /* legacy mode - match the previous simple 3-level implementation */ - - int def = odp_schedule_default_prio(); - - /* needs to be synced with queue_prio_e values. Due to enum this can't be #if */ - COMPILE_TIME_ASSERT(EM_QUEUE_PRIO_HIGHEST < EM_QUEUE_PRIO_NUM, - "queue_prio_e values / EM_QUEUE_PRIO_NUM mismatch!\n"); - - /* init both ends first */ - for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) - em_shm->queue_prio.map[i] = i < (EM_QUEUE_PRIO_NUM / 2) ? minp : maxp; - - /* then add NORMAL in the middle */ - em_shm->queue_prio.map[EM_QUEUE_PRIO_NORMAL] = def; - /* if room: widen the normal range a bit */ - if (EM_QUEUE_PRIO_NORMAL - EM_QUEUE_PRIO_LOW > 1) /* legacy 4-2 */ - em_shm->queue_prio.map[EM_QUEUE_PRIO_NORMAL - 1] = def; - if (EM_QUEUE_PRIO_HIGH - EM_QUEUE_PRIO_NORMAL > 1) /* legacy 6-4 */ - em_shm->queue_prio.map[EM_QUEUE_PRIO_NORMAL + 1] = def; -} - -static void queue_init_prio_adaptive(int minp, int maxp, int nump) -{ - double step = (double)nump / EM_QUEUE_PRIO_NUM; - double cur = (double)minp; - - /* simple linear fit to available levels */ - - for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { - em_shm->queue_prio.map[i] = (int)cur; - cur += step; - } - - /* last EM prio always highest ODP level */ - if (em_shm->queue_prio.map[EM_QUEUE_PRIO_NUM - 1] != maxp) - em_shm->queue_prio.map[EM_QUEUE_PRIO_NUM - 1] = maxp; -} - -static int queue_init_prio_custom(int minp, int maxp) -{ - for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { - em_shm->queue_prio.map[i] = minp + em_shm->opt.queue.priority.custom_map[i]; - if (em_shm->queue_prio.map[i] > maxp || em_shm->queue_prio.map[i] < minp) { - EM_PRINT("Invalid odp priority %d!\n", em_shm->queue_prio.map[i]); - return -1; - } - } - return 0; -} - -static int queue_init_prio_map(int minp, int maxp, int nump) -{ - /* EM normally uses 8 priority levels (EM_QUEUE_PRIO_NUM). - * These are mapped to ODP runtime values depending on selected map mode - */ - - switch (em_shm->opt.queue.priority.map_mode) { - case 0: /* legacy mode, use only 3 levels */ - queue_init_prio_legacy(minp, maxp); - break; - case 1: /* adapt to runtime (full spread) */ - queue_init_prio_adaptive(minp, maxp, nump); - break; - case 2: /** custom */ - if (queue_init_prio_custom(minp, maxp) != 0) - return -1; - break; - default: - EM_PRINT("Unknown map_mode %d!\n", em_shm->opt.queue.priority.map_mode); - return -1; - } - - EM_PRINT(" EM uses %d priorities, runtime %d (%d-%d)\n", - EM_QUEUE_PRIO_NUM, nump, minp, nump - minp - 1); - print_queue_prio_info(); - return 0; -} - -const char *queue_get_state_str(queue_state_t state) -{ - const char *str; - - switch (state) { - case EM_QUEUE_STATE_INVALID: - str = "INVALID"; - break; - case EM_QUEUE_STATE_INIT: - str = "INIT"; - break; - case EM_QUEUE_STATE_BIND: - str = "BIND"; - break; - case EM_QUEUE_STATE_READY: - str = "READY"; - break; - case EM_QUEUE_STATE_UNSCHEDULED: - str = "UNSCH"; - break; - default: - str = "UNKNOWN"; - break; - } - - return str; -} - -const char *queue_get_type_str(em_queue_type_t type) -{ - const char *type_str; - - switch (type) { - case EM_QUEUE_TYPE_UNDEF: - type_str = "UNDEF"; - break; - case EM_QUEUE_TYPE_ATOMIC: - type_str = "ATOMIC"; - break; - case EM_QUEUE_TYPE_PARALLEL: - type_str = "PARALLEL"; - break; - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - type_str = "ORDERED"; - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - type_str = "UNSCH"; - break; - case EM_QUEUE_TYPE_LOCAL: - type_str = "LOCAL"; - break; - case EM_QUEUE_TYPE_OUTPUT: - type_str = "OUTPUT"; - break; - default: - type_str = "UNKNOWN"; - break; - } - - return type_str; -} - -#define QUEUE_INFO_HDR_STR \ -"Number of queues: %d\n\n" \ -"Handle Name Priority Type State Qgrp" \ -" Agrp EO Multi-rcv Max-events Ctx\n" \ -"---------------------------------------------------------------------------" \ -"----------------------------------------------------\n" \ -"%s\n" - -#define QUEUE_INFO_LEN 128 - -#define QUEUE_INFO_FMT \ -"%-10" PRI_QUEUE "%-32s%-10" PRI_QPRIO "%-10s%-9s%-10" PRI_QGRP "%-10" PRI_AGRP \ -"%-10" PRI_EO "%-11c%-12d%-3c\n" /*128 bytes per queue*/ - -void print_queue_info(void) -{ - unsigned int q_num; - const queue_elem_t *q_elem; - char q_name[EM_QUEUE_NAME_LEN]; - int len = 0; - int n_print = 0; - - em_queue_t q = em_queue_get_first(&q_num); - - /* q_num may not match the amount of queues actually returned by iterating - * using em_queue_get_next() if queues are added or removed in parallel - * by another core. Thus space for 10 extra queues is reserved. If more - * than 10 queues are added by other cores in parallel, we print only info - * of the (q_num + 10) queues. - */ - const int q_info_buf_len = (q_num + 10) * QUEUE_INFO_LEN + 1/*Terminating null byte*/; - char q_info_buf[q_info_buf_len]; - - while (q != EM_QUEUE_UNDEF) { - q_elem = queue_elem_get(q); - - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { - q = em_queue_get_next(); - continue; - } - - queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1); - n_print = snprintf(q_info_buf + len, - q_info_buf_len - len, - QUEUE_INFO_FMT, - q, q_name, q_elem->priority, - queue_get_type_str(q_elem->type), - queue_get_state_str(q_elem->state), - q_elem->queue_group, - q_elem->atomic_group, - q_elem->eo, - q_elem->use_multi_rcv ? 'Y' : 'N', - q_elem->max_events, - q_elem->context ? 'Y' : 'N'); - - /* Not enough space to hold more queue info */ - if (n_print >= q_info_buf_len - len) - break; - - len += n_print; - q = em_queue_get_next(); - } - - /* No queue */ - if (len == 0) { - EM_PRINT("No EM queue!\n"); - return; - } - - /* - * To prevent printing incomplete information of the last queue when - * there is not enough space to hold all queue info. - */ - q_info_buf[len] = '\0'; - EM_PRINT(QUEUE_INFO_HDR_STR, q_num, q_info_buf); -} +/* + * Copyright (c) 2015-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +#define EM_Q_BASENAME "EM_Q_" + +/** + * Default queue create conf to use if not provided by the user + */ +static const em_queue_conf_t default_queue_conf = { + .flags = EM_QUEUE_FLAG_DEFAULT, + .min_events = 0, /* use EM default value */ + .conf_len = 0, /* .conf is ignored if this is 0 */ + .conf = NULL +}; + +static int +queue_init_prio_map(int minp, int maxp, int nump); +static void +queue_init_prio_legacy(int minp, int maxp); +static void +queue_init_prio_adaptive(int minp, int maxp, int nump); +static int +queue_init_prio_custom(int minp, int maxp); + +static inline int +queue_create_check_sched(const queue_setup_t *setup, const char **err_str); + +static int +queue_setup(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str); +static void +queue_setup_odp_common(const queue_setup_t *setup, + odp_queue_param_t *odp_queue_param); +static int +queue_setup_scheduled(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str); +static int +queue_setup_unscheduled(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str); +static int +queue_setup_local(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str); +static int +queue_setup_output(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str); + +static inline queue_elem_t * +queue_poolelem2queue(objpool_elem_t *const queue_pool_elem) +{ + return (queue_elem_t *)((uintptr_t)queue_pool_elem - + offsetof(queue_elem_t, queue_pool_elem)); +} + +static int +read_config_file(void) +{ + const char *conf_str; + int val = 0; + int ret; + + EM_PRINT("EM-queue config:\n"); + + /* + * Option: queue.min_events_default + */ + conf_str = "queue.min_events_default"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found.\n", conf_str); + return -1; + } + if (val < 0) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", + conf_str, val); + return -1; + } + /* store & print the value */ + em_shm->opt.queue.min_events_default = val; + EM_PRINT(" %s: %d\n", conf_str, val); + + /* + * Option: queue.prio_map_mode + */ + conf_str = "queue.priority.map_mode"; + ret = em_libconfig_lookup_int(&em_shm->libconfig, conf_str, &val); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found\n", conf_str); + return -1; + } + if (val < 0 || val > 2) { + EM_LOG(EM_LOG_ERR, "Bad config value '%s = %d'\n", conf_str, val); + return -1; + } + em_shm->opt.queue.priority.map_mode = val; + EM_PRINT(" %s: %d\n", conf_str, val); + + if (val == 2) { /* custom map */ + conf_str = "queue.priority.custom_map"; + ret = em_libconfig_lookup_array(&em_shm->libconfig, conf_str, + em_shm->opt.queue.priority.custom_map, + EM_QUEUE_PRIO_NUM); + if (unlikely(!ret)) { + EM_LOG(EM_LOG_ERR, "Config option '%s' not found or invalid\n", conf_str); + return -1; + } + EM_PRINT(" %s: [", conf_str); + for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { + EM_PRINT("%d", em_shm->opt.queue.priority.custom_map[i]); + if (i < (EM_QUEUE_PRIO_NUM - 1)) + EM_PRINT(","); + } + EM_PRINT("]\n"); + } + return 0; +} + +/** + * Helper: initialize a queue pool (populate pool with q_elems) + */ +static int +queue_pool_init(queue_tbl_t *const queue_tbl, + queue_pool_t *const queue_pool, + int min_qidx, int max_qidx) +{ + const int cores = em_core_count(); + const int qs_per_pool = (max_qidx - min_qidx + 1); + int qs_per_subpool = qs_per_pool / cores; + int qs_leftover = qs_per_pool % cores; + int subpool_idx = 0; + int add_cnt = 0; + + if (objpool_init(&queue_pool->objpool, cores) != 0) + return -1; + + for (int i = min_qidx; i <= max_qidx; i++) { + objpool_add(&queue_pool->objpool, subpool_idx, + &queue_tbl->queue_elem[i].queue_pool_elem); + add_cnt++; + if (add_cnt == qs_per_subpool + qs_leftover) { + subpool_idx++; /* add to next subpool */ + qs_leftover = 0; /* added leftovers to subpool 0 */ + add_cnt = 0; + } + } + + return 0; +} + +/** + * Initialize the EM queues + */ +em_status_t +queue_init(queue_tbl_t *const queue_tbl, + queue_pool_t *const queue_pool, + queue_pool_t *const queue_pool_static) +{ + odp_queue_capability_t *const odp_queue_capa = + &queue_tbl->odp_queue_capability; + odp_schedule_capability_t *const odp_sched_capa = + &queue_tbl->odp_schedule_capability; + int min; + int max; + int ret; + + memset(queue_tbl, 0, sizeof(queue_tbl_t)); + memset(queue_pool, 0, sizeof(queue_pool_t)); + memset(queue_pool_static, 0, sizeof(queue_pool_t)); + env_atomic32_init(&em_shm->queue_count); + + if (read_config_file()) + return EM_ERR_LIB_FAILED; + + /* Retieve and store the ODP queue capabilities into 'queue_tbl' */ + ret = odp_queue_capability(odp_queue_capa); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "odp_queue_capability():%d failed", ret); + + /* Retieve and store the ODP schedule capabilities into 'queue_tbl' */ + ret = odp_schedule_capability(odp_sched_capa); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "odp_schedule_capability():%d failed", ret); + + RETURN_ERROR_IF(odp_queue_capa->max_queues < EM_MAX_QUEUES, + EM_ERR_TOO_LARGE, EM_ESCOPE_INIT, + "EM_MAX_QUEUES:%i > odp-max-queues:%u", + EM_MAX_QUEUES, odp_queue_capa->max_queues); + + /* Initialize the queue element table */ + for (int i = 0; i < EM_MAX_QUEUES; i++) + queue_tbl->queue_elem[i].queue = queue_idx2hdl(i); + + /* Initialize the static queue pool */ + min = queue_id2idx(EM_QUEUE_STATIC_MIN); + max = queue_id2idx(LAST_INTERNAL_QUEUE); + if (queue_pool_init(queue_tbl, queue_pool_static, min, max) != 0) + return EM_ERR_LIB_FAILED; + + /* Initialize the dynamic queue pool */ + min = queue_id2idx(FIRST_DYN_QUEUE); + max = queue_id2idx(LAST_DYN_QUEUE); + if (queue_pool_init(queue_tbl, queue_pool, min, max) != 0) + return EM_ERR_LIB_FAILED; + + /* Initialize priority mapping, adapt to values from ODP */ + min = odp_schedule_min_prio(); + max = odp_schedule_max_prio(); + em_shm->queue_prio.num_runtime = max - min + 1; + ret = queue_init_prio_map(min, max, em_shm->queue_prio.num_runtime); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "mapping odp priorities failed: %d", ret); + return EM_OK; +} + +/** + * Queue inits done during EM core local init (once at startup on each core). + * + * Initialize event storage for queues of type 'EM_QUEUE_TYPE_LOCAL'. + */ +em_status_t +queue_init_local(void) +{ + em_locm_t *const locm = &em_locm; + odp_stash_capability_t stash_capa; + odp_stash_param_t stash_param; + unsigned int num_obj = 0; + int core = em_core_id(); + char name[ODP_STASH_NAME_LEN]; + + int ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); + + if (ret != 0) + return EM_ERR_LIB_FAILED; + + odp_stash_param_init(&stash_param); + + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_ST; + stash_param.get_mode = ODP_STASH_OP_ST; + + /* Stash size: use EM default queue size value from config file: */ + num_obj = em_shm->opt.queue.min_events_default; + if (num_obj != 0) + stash_param.num_obj = num_obj; + /* else: use odp default as set by odp_stash_param_init() */ + + if (stash_param.num_obj > stash_capa.max_num_obj) { + EM_LOG(EM_LOG_PRINT, + "%s(): req stash.num_obj(%" PRIu64 ") > capa.max_num_obj(%" PRIu64 ").\n" + " ==> using max value:%" PRIu64 "\n", __func__, + stash_param.num_obj, stash_capa.max_num_obj, stash_capa.max_num_obj); + stash_param.num_obj = stash_capa.max_num_obj; + } + + stash_param.obj_size = sizeof(uint64_t); + stash_param.cache_size = 0; /* No core local caching */ + + locm->local_queues.empty = 1; + + for (int prio = 0; prio < EM_QUEUE_PRIO_NUM; prio++) { + snprintf(name, sizeof(name), + "local-q:c%02d:prio%d", core, prio); + name[sizeof(name) - 1] = '\0'; + + locm->local_queues.prio[prio].empty_prio = 1; + locm->local_queues.prio[prio].stash = + odp_stash_create(name, &stash_param); + if (unlikely(locm->local_queues.prio[prio].stash == + ODP_STASH_INVALID)) + return EM_ERR_ALLOC_FAILED; + } + + memset(&locm->output_queue_track, 0, + sizeof(locm->output_queue_track)); + + return EM_OK; +} + +/** + * Queue termination done during em_term_core(). + * + * Flush & destroy event storage for queues of type 'EM_QUEUE_TYPE_LOCAL'. + */ +em_status_t +queue_term_local(void) +{ + stash_entry_t entry_tbl[EM_SCHED_MULTI_MAX_BURST]; + em_event_t ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; + em_status_t stat = EM_OK; + + for (;;) { + int num = next_local_queue_events(entry_tbl /*[out]*/, + EM_SCHED_MULTI_MAX_BURST); + if (num <= 0) + break; + + for (int i = 0; i < num; i++) + ev_tbl[i] = (em_event_t)(uintptr_t)entry_tbl[i].evptr; + + event_to_hdr_multi(ev_tbl, ev_hdr_tbl, num); + + if (esv_enabled()) + evstate_em2usr_multi(ev_tbl, ev_hdr_tbl, num, + EVSTATE__TERM_CORE__QUEUE_LOCAL); + em_free_multi(ev_tbl, num); + } + + for (int prio = 0; prio < EM_QUEUE_PRIO_NUM; prio++) { + int ret = odp_stash_destroy(em_locm.local_queues.prio[prio].stash); + + if (unlikely(ret != 0)) + stat = EM_ERR_LIB_FAILED; + } + + return stat; +} + +/** + * Allocate a new EM queue + * + * @param queue EM queue handle if a specific EM queue is requested, + * EM_QUEUE_UNDEF if any EM queue will do. + * + * @return EM queue handle + * @retval EM_QUEUE_UNDEF on failure + */ +em_queue_t queue_alloc(em_queue_t queue, const char **err_str) +{ + queue_elem_t *queue_elem; + objpool_elem_t *queue_pool_elem; + + if (queue == EM_QUEUE_UNDEF) { + /* + * Allocate a dynamic queue, i.e. take next available + */ + queue_pool_elem = objpool_rem(&em_shm->queue_pool.objpool, + em_core_id()); + if (unlikely(queue_pool_elem == NULL)) { + *err_str = "queue pool element alloc failed!"; + return EM_QUEUE_UNDEF; + } + queue_elem = queue_poolelem2queue(queue_pool_elem); + } else { + /* + * Allocate a specific static-handle queue, handle given + */ + internal_queue_t iq; + + iq.queue = queue; + if (iq.queue_id < EM_QUEUE_STATIC_MIN || + iq.queue_id > LAST_INTERNAL_QUEUE) { + *err_str = "queue handle not from static range!"; + return EM_QUEUE_UNDEF; + } + + queue_elem = queue_elem_get(queue); + if (unlikely(queue_elem == NULL)) { + *err_str = "queue_elem ptr NULL!"; + return EM_QUEUE_UNDEF; + } + /* Verify that the queue is not allocated */ + if (queue_allocated(queue_elem)) { + *err_str = "queue already allocated!"; + return EM_QUEUE_UNDEF; + } + /* Remove the queue from the pool */ + int ret = objpool_rem_elem(&em_shm->queue_pool_static.objpool, + &queue_elem->queue_pool_elem); + if (unlikely(ret != 0)) { + *err_str = "static queue pool element alloc failed!"; + return EM_QUEUE_UNDEF; + } + } + + env_atomic32_inc(&em_shm->queue_count); + return queue_elem->queue; +} + +em_status_t queue_free(em_queue_t queue) +{ + queue_elem_t *const queue_elem = queue_elem_get(queue); + objpool_t *objpool; + internal_queue_t iq; + + iq.queue = queue; + + if (unlikely(queue_elem == NULL)) + return EM_ERR_BAD_ID; + + if (iq.queue_id >= EM_QUEUE_STATIC_MIN && + iq.queue_id <= LAST_INTERNAL_QUEUE) + objpool = &em_shm->queue_pool_static.objpool; + else + objpool = &em_shm->queue_pool.objpool; + + queue_elem->state = EM_QUEUE_STATE_INVALID; + + objpool_add(objpool, + queue_elem->queue_pool_elem.subpool_idx, + &queue_elem->queue_pool_elem); + + env_atomic32_dec(&em_shm->queue_count); + return EM_OK; +} + +static int +queue_create_check_sched(const queue_setup_t *setup, const char **err_str) +{ + const queue_group_elem_t *queue_group_elem = NULL; + const atomic_group_elem_t *ag_elem = NULL; + + queue_group_elem = queue_group_elem_get(setup->queue_group); + /* scheduled queues are always associated with a queue group */ + if (unlikely(queue_group_elem == NULL || !queue_group_allocated(queue_group_elem))) { + *err_str = "Invalid queue group!"; + return -1; + } + + if (setup->atomic_group != EM_ATOMIC_GROUP_UNDEF) { + ag_elem = atomic_group_elem_get(setup->atomic_group); + if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { + *err_str = "Invalid atomic group!"; + return -1; + } + } + + if (unlikely(setup->prio >= EM_QUEUE_PRIO_NUM)) { + *err_str = "Invalid queue priority!"; + return -1; + } + return 0; +} + +static int +queue_create_check_args(const queue_setup_t *setup, const char **err_str) +{ + /* scheduled queue */ + if (setup->type == EM_QUEUE_TYPE_ATOMIC || + setup->type == EM_QUEUE_TYPE_PARALLEL || + setup->type == EM_QUEUE_TYPE_PARALLEL_ORDERED) + return queue_create_check_sched(setup, err_str); + + /* other queue types */ + switch (setup->type) { + case EM_QUEUE_TYPE_UNSCHEDULED: + /* API arg checks for unscheduled queues */ + if (unlikely(setup->prio != EM_QUEUE_PRIO_UNDEF)) { + *err_str = "Invalid priority for unsched queue!"; + return -1; + } + if (unlikely(setup->queue_group != EM_QUEUE_GROUP_UNDEF)) { + *err_str = "Queue group not used with unsched queues!"; + return -1; + } + if (unlikely(setup->atomic_group != EM_ATOMIC_GROUP_UNDEF)) { + *err_str = "Atomic group not used with unsched queues!"; + return -1; + } + break; + + case EM_QUEUE_TYPE_LOCAL: + /* API arg checks for local queues */ + if (unlikely(setup->queue_group != EM_QUEUE_GROUP_UNDEF)) { + *err_str = "Queue group not used with local queues!"; + return -1; + } + if (unlikely(setup->atomic_group != EM_ATOMIC_GROUP_UNDEF)) { + *err_str = "Atomic group not used with local queues!"; + return -1; + } + if (unlikely(setup->prio >= EM_QUEUE_PRIO_NUM)) { + *err_str = "Invalid queue priority!"; + return -1; + } + break; + + case EM_QUEUE_TYPE_OUTPUT: + /* API arg checks for output queues */ + if (unlikely(setup->queue_group != EM_QUEUE_GROUP_UNDEF)) { + *err_str = "Queue group not used with output queues!"; + return -1; + } + if (unlikely(setup->atomic_group != EM_ATOMIC_GROUP_UNDEF)) { + *err_str = "Atomic group not used with output queues!"; + return -1; + } + if (unlikely(setup->conf == NULL || + setup->conf->conf_len < sizeof(em_output_queue_conf_t) || + setup->conf->conf == NULL)) { + *err_str = "Invalid output queue conf"; + return -1; + } + break; + + default: + *err_str = "Unknown queue type"; + return -1; + } + + return 0; +} + +/** + * Create an EM queue: alloc, setup and add to queue group list + */ +em_queue_t +queue_create(const char *name, em_queue_type_t type, em_queue_prio_t prio, + em_queue_group_t queue_group, em_queue_t queue_req, + em_atomic_group_t atomic_group, const em_queue_conf_t *conf, + const char **err_str) +{ + int err; + + /* Use default EM queue conf if none given */ + if (conf == NULL) + conf = &default_queue_conf; + + queue_setup_t setup = {.name = name, .type = type, .prio = prio, + .atomic_group = atomic_group, + .queue_group = queue_group, .conf = conf}; + + err = queue_create_check_args(&setup, err_str); + if (err) { + /* 'err_str' set by queue_create_check_args() */ + return EM_QUEUE_UNDEF; + } + + /* + * Allocate the queue handle and obtain the corresponding queue-element + */ + const char *alloc_err_str = ""; + + em_queue_t queue = queue_alloc(queue_req, &alloc_err_str); + + if (unlikely(queue == EM_QUEUE_UNDEF)) { + *err_str = alloc_err_str; + return EM_QUEUE_UNDEF; + } + if (unlikely(queue_req != EM_QUEUE_UNDEF && queue_req != queue)) { + queue_free(queue); + *err_str = "Failed to allocate the requested queue!"; + return EM_QUEUE_UNDEF; + } + + queue_elem_t *queue_elem = queue_elem_get(queue); + + if (unlikely(!queue_elem)) { + queue_free(queue); + *err_str = "Queue elem NULL!"; + return EM_QUEUE_UNDEF; + } + + /* + * Setup/configure the queue + */ + err = queue_setup(queue_elem, &setup, err_str); + if (unlikely(err)) { + queue_free(queue); + /* 'err_str' set by queue_setup() */ + return EM_QUEUE_UNDEF; + } + + return queue; +} + +em_status_t +queue_delete(queue_elem_t *const queue_elem) +{ + queue_state_t old_state; + queue_state_t new_state; + em_status_t ret; + em_queue_t queue = queue_elem->queue; + em_queue_type_t type = queue_elem->type; + + if (unlikely(!queue_allocated(queue_elem))) + return EM_ERR_BAD_STATE; + + old_state = queue_elem->state; + new_state = EM_QUEUE_STATE_INVALID; + + if (type != EM_QUEUE_TYPE_UNSCHEDULED && + type != EM_QUEUE_TYPE_OUTPUT) { + /* verify scheduled queue state transition */ + ret = queue_state_change__check(old_state, new_state, + 0/*!is_setup*/); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_DELETE, + "EM-Q:%" PRI_QUEUE " inv. state change:%d=>%d", + queue, old_state, new_state); + } + + if (type != EM_QUEUE_TYPE_UNSCHEDULED && + type != EM_QUEUE_TYPE_LOCAL && + type != EM_QUEUE_TYPE_OUTPUT) { + queue_group_elem_t *const queue_group_elem = + queue_group_elem_get(queue_elem->queue_group); + + RETURN_ERROR_IF(queue_group_elem == NULL || + !queue_group_allocated(queue_group_elem), + EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_DELETE, + "Invalid queue group: %" PRI_QGRP "", + queue_elem->queue_group); + + /* Remove the queue from the queue group list */ + queue_group_rem_queue_list(queue_group_elem, queue_elem); + } + + if (type == EM_QUEUE_TYPE_OUTPUT) { + env_spinlock_t *const lock = &queue_elem->output.lock; + q_elem_output_t *const q_out = &queue_elem->output; + + env_spinlock_lock(lock); + /* Drain any remaining events from the output queue */ + output_queue_drain(queue_elem); + env_spinlock_unlock(lock); + + /* delete the fn-args storage if allocated in create */ + if (q_out->output_fn_args_event != EM_EVENT_UNDEF) { + em_free(q_out->output_fn_args_event); + q_out->output_fn_args_event = EM_EVENT_UNDEF; + } + } + + if (queue_elem->odp_queue != ODP_QUEUE_INVALID && + !queue_elem->is_pktin) { + int err = odp_queue_destroy(queue_elem->odp_queue); + + RETURN_ERROR_IF(err, EM_ERR_LIB_FAILED, EM_ESCOPE_QUEUE_DELETE, + "EM-Q:%" PRI_QUEUE ":odp_queue_destroy(" PRIu64 "):%d", + queue, odp_queue_to_u64(queue_elem->odp_queue), + err); + } + + queue_elem->odp_queue = ODP_QUEUE_INVALID; + + /* Zero queue name */ + em_shm->queue_tbl.name[queue_hdl2idx(queue)][0] = '\0'; + + /* Remove the queue from the atomic group it belongs to, if any */ + atomic_group_remove_queue(queue_elem); + + return queue_free(queue); +} + +/** + * Setup an allocated/created queue before use. + */ +static int +queue_setup(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str) +{ + int ret; + + /* Set common queue-elem fields based on setup */ + queue_setup_common(q_elem, setup); + + switch (setup->type) { + case EM_QUEUE_TYPE_ATOMIC: /* fallthrough */ + case EM_QUEUE_TYPE_PARALLEL: /* fallthrough */ + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + ret = queue_setup_scheduled(q_elem, setup, err_str); + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + ret = queue_setup_unscheduled(q_elem, setup, err_str); + break; + case EM_QUEUE_TYPE_LOCAL: + ret = queue_setup_local(q_elem, setup, err_str); + break; + case EM_QUEUE_TYPE_OUTPUT: + ret = queue_setup_output(q_elem, setup, err_str); + break; + default: + *err_str = "Queue setup: unknown queue type"; + ret = -1; + break; + } + + if (unlikely(ret)) + return -1; + + env_sync_mem(); + return 0; +} + +/** + * Helper function to queue_setup() + * + * Set EM queue params common to all EM queues based on EM config + */ +void queue_setup_common(queue_elem_t *q_elem /*out*/, + const queue_setup_t *setup) +{ + const em_queue_t queue = q_elem->queue; + char *const qname = &em_shm->queue_tbl.name[queue_hdl2idx(queue)][0]; + + /* checks that the odp queue context points to an EM queue elem */ + q_elem->valid_check = QUEUE_ELEM_VALID; + + /* Store queue name */ + if (setup->name) + strncpy(qname, setup->name, EM_QUEUE_NAME_LEN); + else /* default unique name: "EM_Q_" + Q-id = e.g. EM_Q_1234 */ + snprintf(qname, EM_QUEUE_NAME_LEN, + "%s%" PRI_QUEUE "", EM_Q_BASENAME, queue); + qname[EM_QUEUE_NAME_LEN - 1] = '\0'; + + /* Init q_elem fields based on setup params and clear the rest */ + q_elem->type = setup->type; + q_elem->priority = setup->prio; + q_elem->queue_group = setup->queue_group; + q_elem->atomic_group = setup->atomic_group; + + /* Clear the rest */ + q_elem->odp_queue = ODP_QUEUE_INVALID; + q_elem->is_pktin = false; + q_elem->scheduled = EM_FALSE; + q_elem->state = EM_QUEUE_STATE_INVALID; + q_elem->context = NULL; + q_elem->eo = EM_EO_UNDEF; + q_elem->eo_elem = NULL; + q_elem->eo_ctx = NULL; + q_elem->use_multi_rcv = 0; + q_elem->max_events = 0; + q_elem->receive_func = NULL; + q_elem->receive_multi_func = NULL; +} + +/** + * Helper function to queue_setup_...() + * + * Set common ODP queue params based on EM config + */ +static void +queue_setup_odp_common(const queue_setup_t *setup, + odp_queue_param_t *odp_queue_param /*out*/) +{ + /* + * Set ODP queue params according to EM queue conf flags + */ + const em_queue_conf_t *conf = setup->conf; + em_queue_flag_t flags = conf->flags & EM_QUEUE_FLAG_MASK; + + if (flags != EM_QUEUE_FLAG_DEFAULT) { + if (flags & EM_QUEUE_FLAG_NONBLOCKING_WF) + odp_queue_param->nonblocking = ODP_NONBLOCKING_WF; + else if (flags & EM_QUEUE_FLAG_NONBLOCKING_LF) + odp_queue_param->nonblocking = ODP_NONBLOCKING_LF; + + if (flags & EM_QUEUE_FLAG_ENQ_NOT_MTSAFE) + odp_queue_param->enq_mode = ODP_QUEUE_OP_MT_UNSAFE; + if (flags & EM_QUEUE_FLAG_DEQ_NOT_MTSAFE) + odp_queue_param->deq_mode = ODP_QUEUE_OP_MT_UNSAFE; + } + + /* + * Set minimum queue size if other than 'default'(0) + */ + if (conf->min_events == 0) { + /* use EM default value from config file: */ + unsigned int size = em_shm->opt.queue.min_events_default; + + if (size != 0) + odp_queue_param->size = size; + /* else: use odp default as set by odp_queue_param_init() */ + } else { + /* use user provided value: */ + odp_queue_param->size = conf->min_events; + } +} + +/** + * Create an ODP queue for the newly created EM queue + */ +static int create_odp_queue(queue_elem_t *q_elem, + const odp_queue_param_t *odp_queue_param) +{ + char odp_name[ODP_QUEUE_NAME_LEN]; + odp_queue_t odp_queue; + + (void)queue_get_name(q_elem, odp_name/*out*/, sizeof(odp_name)); + + odp_queue = odp_queue_create(odp_name, odp_queue_param); + if (unlikely(odp_queue == ODP_QUEUE_INVALID)) + return -1; + + /* Store the corresponding ODP Queue */ + q_elem->odp_queue = odp_queue; + + return 0; +} + +/** + * Helper function to queue_setup() + * + * Set EM and ODP queue params for scheduled queues + */ +static int +queue_setup_scheduled(queue_elem_t *q_elem /*in,out*/, + const queue_setup_t *setup, const char **err_str) +{ + /* validity checks done earlier for queue_group */ + queue_group_elem_t *qgrp_elem = queue_group_elem_get(setup->queue_group); + int err; + + if (unlikely(qgrp_elem == NULL)) { + *err_str = "Q-setup-sched: invalid queue group!"; + return -1; + } + + q_elem->priority = setup->prio; + q_elem->type = setup->type; + q_elem->queue_group = setup->queue_group; + q_elem->atomic_group = setup->atomic_group; + + q_elem->scheduled = EM_TRUE; + q_elem->state = EM_QUEUE_STATE_INIT; + + /* + * Set up a scheduled ODP queue for the EM scheduled queue + */ + odp_queue_param_t odp_queue_param; + odp_schedule_sync_t odp_schedule_sync; + odp_schedule_prio_t odp_prio; + + /* Init odp queue params to default values */ + odp_queue_param_init(&odp_queue_param); + /* Set common ODP queue params based on the EM Queue config */ + queue_setup_odp_common(setup, &odp_queue_param /*out*/); + + err = scheduled_queue_type_em2odp(setup->type, + &odp_schedule_sync /*out*/); + if (unlikely(err)) { + *err_str = "Q-setup-sched: invalid queue type!"; + return -2; + } + + err = prio_em2odp(setup->prio, &odp_prio /*out*/); + if (unlikely(err)) { + *err_str = "Q-setup-sched: invalid queue priority!"; + return -3; + } + + odp_queue_param.type = ODP_QUEUE_TYPE_SCHED; + odp_queue_param.sched.prio = odp_prio; + odp_queue_param.sched.sync = odp_schedule_sync; + odp_queue_param.sched.group = qgrp_elem->odp_sched_group; + + /* Retrieve previously stored ODP scheduler capabilities */ + const odp_schedule_capability_t *odp_sched_capa = + &em_shm->queue_tbl.odp_schedule_capability; + + /* + * Check nonblocking level against sched queue capabilities. + * Related ODP queue params set earlier in queue_setup_common(). + */ + if (odp_queue_param.nonblocking == ODP_NONBLOCKING_LF && + odp_sched_capa->lockfree_queues == ODP_SUPPORT_NO) { + *err_str = "Q-setup-sched: non-blocking, lock-free sched queues unavailable"; + return -4; + } + if (odp_queue_param.nonblocking == ODP_NONBLOCKING_WF && + odp_sched_capa->waitfree_queues == ODP_SUPPORT_NO) { + *err_str = "Q-setup-sched: non-blocking, wait-free sched queues unavailable"; + return -5; + } + if (odp_queue_param.enq_mode != ODP_QUEUE_OP_MT || + odp_queue_param.deq_mode != ODP_QUEUE_OP_MT) { + *err_str = "Q-setup-sched: invalid flag: scheduled queues must be MT-safe"; + return -6; + } + + /* + * Note: The ODP queue context points to the EM queue elem. + * The EM queue context set by the user using the API function + * em_queue_set_context() is accessed through the queue_elem_t::context + * and retrieved with em_queue_get_context() or passed by EM to the + * EO-receive function for scheduled queues. + */ + odp_queue_param.context = q_elem; + /* + * Set the context data length (in bytes) for potential prefetching. + * The ODP implementation may use this value as a hint for the number + * of context data bytes to prefetch. + */ + odp_queue_param.context_len = sizeof(*q_elem); + + err = create_odp_queue(q_elem, &odp_queue_param); + if (unlikely(err)) { + *err_str = "Q-setup-sched: scheduled odp queue creation failed!"; + return -7; + } + + /* + * Add the scheduled queue to the queue group + */ + queue_group_add_queue_list(qgrp_elem, q_elem); + + return 0; +} + +/* + * Helper function to queue_setup() + * + * Set EM and ODP queue params for unscheduled queues + */ +static int +queue_setup_unscheduled(queue_elem_t *q_elem /*in,out*/, + const queue_setup_t *setup, const char **err_str) +{ + q_elem->priority = EM_QUEUE_PRIO_UNDEF; + q_elem->type = EM_QUEUE_TYPE_UNSCHEDULED; + q_elem->queue_group = EM_QUEUE_GROUP_UNDEF; + q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; + /* unscheduled queues are not scheduled */ + q_elem->scheduled = EM_FALSE; + q_elem->state = EM_QUEUE_STATE_UNSCHEDULED; + + /* + * Set up a plain ODP queue for the EM unscheduled queue. + */ + odp_queue_param_t odp_queue_param; + /* Retrieve previously stored ODP queue capabilities */ + const odp_queue_capability_t *odp_queue_capa = + &em_shm->queue_tbl.odp_queue_capability; + + /* Init odp queue params to default values */ + odp_queue_param_init(&odp_queue_param); + /* Set common ODP queue params based on the EM Queue config */ + queue_setup_odp_common(setup, &odp_queue_param); + + odp_queue_param.type = ODP_QUEUE_TYPE_PLAIN; + /* don't order events enqueued into unsched queues */ + odp_queue_param.order = ODP_QUEUE_ORDER_IGNORE; + + /* + * Check nonblocking level against plain queue capabilities. + * Related ODP queue params set earlier in queue_setup_common(). + */ + if (odp_queue_param.nonblocking == ODP_NONBLOCKING_LF && + odp_queue_capa->plain.lockfree.max_num == 0) { + *err_str = "Q-setup-unsched: non-blocking, lock-free unsched queues unavailable"; + return -1; + } + if (odp_queue_param.nonblocking == ODP_NONBLOCKING_WF && + odp_queue_capa->plain.waitfree.max_num == 0) { + *err_str = "Q-setup-unsched: non-blocking, wait-free unsched queues unavailable"; + return -2; + } + + /* + * Note: The ODP queue context points to the EM queue elem. + * The EM queue context set by the user using the API function + * em_queue_set_context() is accessed through the queue_elem_t::context + * and retrieved with em_queue_get_context(). + */ + odp_queue_param.context = q_elem; + /* + * Set the context data length (in bytes) for potential prefetching. + * The ODP implementation may use this value as a hint for the number + * of context data bytes to prefetch. + */ + odp_queue_param.context_len = sizeof(*q_elem); + + int err = create_odp_queue(q_elem, &odp_queue_param); + + if (unlikely(err)) { + *err_str = "Q-setup-unsched: plain odp queue creation failed!"; + return -3; + } + + return 0; +} + +/* + * Helper function to queue_setup() + * + * Set EM queue params for (core-)local queues + */ +static int +queue_setup_local(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str) +{ + (void)err_str; + + q_elem->priority = setup->prio; + q_elem->type = EM_QUEUE_TYPE_LOCAL; + q_elem->queue_group = EM_QUEUE_GROUP_UNDEF; + q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; + /* local queues are not scheduled */ + q_elem->scheduled = EM_FALSE; + q_elem->state = EM_QUEUE_STATE_INIT; + + return 0; +} + +/* + * Helper function to queue_setup() + * + * Set EM queue params for output queues + */ +static int +queue_setup_output(queue_elem_t *q_elem, const queue_setup_t *setup, + const char **err_str) +{ + const em_queue_conf_t *qconf = setup->conf; + const em_output_queue_conf_t *output_conf = qconf->conf; + + q_elem->priority = EM_QUEUE_PRIO_UNDEF; + q_elem->type = EM_QUEUE_TYPE_OUTPUT; + q_elem->queue_group = EM_QUEUE_GROUP_UNDEF; + q_elem->atomic_group = EM_ATOMIC_GROUP_UNDEF; + /* output queues are not scheduled */ + q_elem->scheduled = EM_FALSE; + /* use unsched state for output queues */ + q_elem->state = EM_QUEUE_STATE_UNSCHEDULED; + + if (unlikely(output_conf->output_fn == NULL)) { + *err_str = "Q-setup-output: invalid output function"; + return -1; + } + + /* copy whole output conf */ + q_elem->output.output_conf = *output_conf; + q_elem->output.output_fn_args_event = EM_EVENT_UNDEF; + if (output_conf->args_len == 0) { + /* 'output_fn_args' is ignored, if 'args_len' is 0 */ + q_elem->output.output_conf.output_fn_args = NULL; + } else { + em_event_t args_event; + void *args_storage; + + /* alloc an event to copy the given fn-args into */ + args_event = em_alloc((uint32_t)output_conf->args_len, + EM_EVENT_TYPE_SW, EM_POOL_DEFAULT); + if (unlikely(args_event == EM_EVENT_UNDEF)) { + *err_str = "Q-setup-output: alloc output_fn_args fails"; + return -2; + } + /* store the event handle for em_free() later */ + q_elem->output.output_fn_args_event = args_event; + args_storage = em_event_pointer(args_event); + memcpy(args_storage, output_conf->output_fn_args, + output_conf->args_len); + /* update the args ptr to point to the copied content */ + q_elem->output.output_conf.output_fn_args = args_storage; + } + env_spinlock_init(&q_elem->output.lock); + + /* + * Set up a plain ODP queue for EM output queue (re-)ordering. + * + * EM output-queues need an odp-queue to ensure re-ordering if + * events are sent into it from within an ordered context. + */ + odp_queue_param_t odp_queue_param; + /* Retrieve previously stored ODP queue capabilities */ + const odp_queue_capability_t *odp_queue_capa = + &em_shm->queue_tbl.odp_queue_capability; + + /* Init odp queue params to default values */ + odp_queue_param_init(&odp_queue_param); + /* Set common ODP queue params based on the EM Queue config */ + queue_setup_odp_common(setup, &odp_queue_param); + + odp_queue_param.type = ODP_QUEUE_TYPE_PLAIN; + odp_queue_param.order = ODP_QUEUE_ORDER_KEEP; + + /* check nonblocking level against plain queue capabilities */ + if (odp_queue_param.nonblocking == ODP_NONBLOCKING_LF && + odp_queue_capa->plain.lockfree.max_num == 0) { + *err_str = "Q-setup-output: non-blocking, lock-free unsched queues unavailable"; + return -3; + } + if (odp_queue_param.nonblocking == ODP_NONBLOCKING_WF && + odp_queue_capa->plain.waitfree.max_num == 0) { + *err_str = "Q-setup-output: non-blocking, wait-free unsched queues unavailable"; + return -4; + } + + /* output-queue dequeue protected by q_elem->output.lock */ + odp_queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; + + /* explicitly show here that output queues should not set odp-context */ + odp_queue_param.context = NULL; + + int err = create_odp_queue(q_elem, &odp_queue_param); + + if (unlikely(err)) { + *err_str = "Q-setup-output: plain odp queue creation failed!"; + return -5; + } + + return 0; +} + +/** + * Helper func for queue_state_change() - check that state change is valid + * + * Valid state transitions: + * --------------------------------- + * | |new-state|new-state | + * |old_state|is_setup |is_teardown| + * |---------|---------|-----------| + * |INVALID | INIT | (NULL) | + * |INIT | BIND | INVALID | + * |BIND | READY | INIT | + * |READY | (NULL) | BIND | + * --------------------------------- + * State change check is made easy because the following condition is true + * for valid state transitions: abs(old-new)=1 + */ +em_status_t +queue_state_change__check(queue_state_t old_state, queue_state_t new_state, + int is_setup /* vs. is_teardown */) +{ + uint32_t state_diff; + + if (is_setup) + state_diff = new_state - old_state; + else + state_diff = old_state - new_state; + + return (state_diff == 1) ? EM_OK : EM_ERR_BAD_STATE; +} + +static inline em_status_t +queue_state_set(queue_elem_t *const q_elem, queue_state_t new_state) +{ + const queue_state_t old_state = q_elem->state; + const int is_setup = (new_state == EM_QUEUE_STATE_READY); + em_status_t err; + + /* allow multiple queue_enable/disable() calls */ + if (new_state == old_state && + (new_state == EM_QUEUE_STATE_READY || + new_state == EM_QUEUE_STATE_BIND)) + return EM_OK; + + err = queue_state_change__check(old_state, new_state, is_setup); + if (unlikely(err != EM_OK)) + return err; + + q_elem->state = new_state; + return EM_OK; +} + +/** + * Change the queue state + */ +em_status_t +queue_state_change(queue_elem_t *const q_elem, queue_state_t new_state) +{ + em_status_t err = queue_state_set(q_elem, new_state); + + RETURN_ERROR_IF(err != EM_OK, err, EM_ESCOPE_QUEUE_STATE_CHANGE, + "EM-Q:%" PRI_QUEUE " inv. state: %d=>%d", + q_elem->queue, q_elem->state, new_state); + return EM_OK; +} + +/** + * Change the queue state for all queues associated with the given EO + */ +em_status_t +queue_state_change_all(eo_elem_t *const eo_elem, queue_state_t new_state) +{ + em_status_t err = EM_OK; + queue_elem_t *q_elem; + list_node_t *pos; + const list_node_t *list_node; + + /* + * Loop through all queues associated with the EO, no need for + * eo_elem-lock since this is called only on single core at the + * end of em_eo_start() + */ + env_spinlock_lock(&eo_elem->lock); + + list_for_each(&eo_elem->queue_list, pos, list_node) { + q_elem = list_node_to_queue_elem(list_node); + err = queue_state_set(q_elem, new_state); + if (unlikely(err != EM_OK)) + break; + } /* end loop */ + + env_spinlock_unlock(&eo_elem->lock); + + RETURN_ERROR_IF(err != EM_OK, err, EM_ESCOPE_QUEUE_STATE_CHANGE, + "EM-Q:%" PRI_QUEUE " inv. state: %d=>%d", + q_elem->queue, q_elem->state, new_state); + return EM_OK; +} + +/** + * Enable event reception of an EM queue + */ +em_status_t +queue_enable(queue_elem_t *const q_elem) +{ + em_status_t ret; + + RETURN_ERROR_IF(q_elem == NULL || !queue_allocated(q_elem), + EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_ENABLE, + "Invalid queue"); + + ret = queue_state_change(q_elem, EM_QUEUE_STATE_READY); + + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_ENABLE, + "queue_state_change()->READY fails EM-Q:%" PRI_QUEUE "", + q_elem->queue); + + return EM_OK; +} + +/** + * Enable event reception of ALL queues belonging to an EO + */ +em_status_t +queue_enable_all(eo_elem_t *const eo_elem) +{ + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), + EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_ENABLE_ALL, + "Invalid EO"); + + ret = queue_state_change_all(eo_elem, EM_QUEUE_STATE_READY); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_ENABLE_ALL, + "queue_state_change_all()->READY fails EO:%" PRI_EO "", + eo_elem->eo); + + return EM_OK; +} + +/** + * Disable event reception of an EM queue + */ +em_status_t +queue_disable(queue_elem_t *const q_elem) +{ + em_status_t ret; + + RETURN_ERROR_IF(q_elem == NULL || !queue_allocated(q_elem), + EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_DISABLE, + "Invalid queue"); + + /* Change the state of the queue */ + ret = queue_state_change(q_elem, EM_QUEUE_STATE_BIND); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_DISABLE, + "queue_state_change()->BIND fails, Q:%" PRI_QUEUE "", + q_elem->queue); + + return EM_OK; +} + +/** + * Disable event reception of ALL queues belonging to an EO + */ +em_status_t +queue_disable_all(eo_elem_t *const eo_elem) +{ + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), + EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_DISABLE_ALL, + "Invalid EO"); + + ret = queue_state_change_all(eo_elem, EM_QUEUE_STATE_BIND); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_QUEUE_DISABLE_ALL, + "queue_state_change_all()->BIND: EO:%" PRI_EO "", + eo_elem->eo); + + return EM_OK; +} + +em_event_t queue_dequeue(const queue_elem_t *q_elem) +{ + odp_queue_t odp_queue; + odp_event_t odp_event; + em_event_t em_event; + + odp_queue = q_elem->odp_queue; + odp_event = odp_queue_deq(odp_queue); + if (odp_event == ODP_EVENT_INVALID) + return EM_EVENT_UNDEF; + + em_event = event_odp2em(odp_event); + + if (esv_enabled()) { + event_hdr_t *ev_hdr = event_to_hdr(em_event); + + em_event = evstate_em2usr(em_event, ev_hdr, EVSTATE__DEQUEUE); + } + + return em_event; +} + +int queue_dequeue_multi(const queue_elem_t *q_elem, + em_event_t events[/*out*/], int num) +{ + odp_queue_t odp_queue; + int ret; + + /* use same output-array for dequeue: odp_events[] = events[] */ + odp_event_t *const odp_events = (odp_event_t *)events; + + odp_queue = q_elem->odp_queue; + ret = odp_queue_deq_multi(odp_queue, odp_events /*out*/, num); + if (ret <= 0) + return ret; + + /* now events[] = odp_events[], events[].evgen missing, set below: */ + if (esv_enabled()) { + event_hdr_t *ev_hdrs[ret]; + + event_to_hdr_multi(events, ev_hdrs, ret); + evstate_em2usr_multi(events, ev_hdrs, ret, + EVSTATE__DEQUEUE_MULTI); + } + + return ret; +} + +void print_queue_capa(void) +{ + const odp_queue_capability_t *queue_capa = + &em_shm->queue_tbl.odp_queue_capability; + const odp_schedule_capability_t *sched_capa = + &em_shm->queue_tbl.odp_schedule_capability; + char plain_sz[24] = "n/a"; + char plain_lf_sz[24] = "n/a"; + char plain_wf_sz[24] = "n/a"; + char sched_sz[24] = "nolimit"; + + if (queue_capa->plain.max_size > 0) + snprintf(plain_sz, sizeof(plain_sz), "%u", + queue_capa->plain.max_size); + if (queue_capa->plain.lockfree.max_size > 0) + snprintf(plain_lf_sz, sizeof(plain_lf_sz), "%u", + queue_capa->plain.lockfree.max_size); + if (queue_capa->plain.waitfree.max_size > 0) + snprintf(plain_wf_sz, sizeof(plain_wf_sz), "%u", + queue_capa->plain.waitfree.max_size); + + if (sched_capa->max_queue_size > 0) + snprintf(sched_sz, sizeof(sched_sz), "%u", + sched_capa->max_queue_size); + + plain_sz[sizeof(plain_sz) - 1] = '\0'; + plain_lf_sz[sizeof(plain_lf_sz) - 1] = '\0'; + plain_wf_sz[sizeof(plain_wf_sz) - 1] = '\0'; + sched_sz[sizeof(sched_sz) - 1] = '\0'; + + EM_PRINT("ODP Queue Capabilities\n" + "----------------------\n" + " Max number of ODP queues: %u\n" + " Max number of ODP ordered locks per queue: %u\n" + " Max number of ODP scheduling groups: %u\n" + " Max number of ODP scheduling priorities: %u\n" + " PLAIN queues:\n" + " blocking: count: %6u size: %6s\n" + " nonblocking-lf: count: %6u size: %6s\n" + " nonblocking-wf: count: %6u size: %6s\n" + " SCHED queues:\n" + " blocking: count: %6u size: %6s\n" + " nonblocking-lf: %ssupported\n" + " nonblocking-wf: %ssupported\n\n", + queue_capa->max_queues, sched_capa->max_ordered_locks, + sched_capa->max_groups, sched_capa->max_prios, + queue_capa->plain.max_num, plain_sz, + queue_capa->plain.lockfree.max_num, plain_lf_sz, + queue_capa->plain.waitfree.max_num, plain_wf_sz, + sched_capa->max_queues, sched_sz, + sched_capa->lockfree_queues == ODP_SUPPORT_NO ? "not " : "", + sched_capa->waitfree_queues == ODP_SUPPORT_NO ? "not " : ""); + + EM_PRINT("EM Queues\n" + "---------\n" + " Max number of EM queues: %d (0x%x)\n" + " EM queue handle offset: %d (0x%x)\n" + " EM queue range: [%d - %d] ([0x%x - 0x%x])\n" + " static range: [%d - %d] ([0x%x - 0x%x])\n" + " internal range: [%d - %d] ([0x%x - 0x%x])\n" + " dynamic range: [%d - %d] ([0x%x - 0x%x])\n" + "\n", + EM_MAX_QUEUES, EM_MAX_QUEUES, + EM_QUEUE_RANGE_OFFSET, EM_QUEUE_RANGE_OFFSET, + EM_QUEUE_STATIC_MIN, LAST_DYN_QUEUE, + EM_QUEUE_STATIC_MIN, LAST_DYN_QUEUE, + EM_QUEUE_STATIC_MIN, EM_QUEUE_STATIC_MAX, + EM_QUEUE_STATIC_MIN, EM_QUEUE_STATIC_MAX, + FIRST_INTERNAL_QUEUE, LAST_INTERNAL_QUEUE, + FIRST_INTERNAL_QUEUE, LAST_INTERNAL_QUEUE, + FIRST_DYN_QUEUE, LAST_DYN_QUEUE, + FIRST_DYN_QUEUE, LAST_DYN_QUEUE); +} + +void print_queue_prio_info(void) +{ + #define MAXPRIOBUF 128 + char buf[MAXPRIOBUF]; + int pos = 0; + + for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { + /* comma separated list of priorities */ + int num = snprintf(&buf[pos], MAXPRIOBUF - pos, "%d%c", + em_shm->queue_prio.map[i], + i < (EM_QUEUE_PRIO_NUM - 1) ? ',' : '\0'); + if (num < 0 || num >= (MAXPRIOBUF - pos)) + break; + pos += num; + } + + buf[MAXPRIOBUF - 1] = 0; + EM_PRINT(" Current queue priority map: [%s]\n", buf); +} + +unsigned int +queue_count(void) +{ + return env_atomic32_get(&em_shm->queue_count); +} + +size_t queue_get_name(const queue_elem_t *const q_elem, + char name[/*out*/], const size_t maxlen) +{ + em_queue_t queue = q_elem->queue; + const char *queue_name = &em_shm->queue_tbl.name[queue_hdl2idx(queue)][0]; + size_t len = strnlen(queue_name, EM_QUEUE_NAME_LEN - 1); + + if (maxlen - 1 < len) + len = maxlen - 1; + + if (len) + memcpy(name, queue_name, len); + name[len] = '\0'; + + return len; +} + +static void queue_init_prio_legacy(int minp, int maxp) +{ + /* legacy mode - match the previous simple 3-level implementation */ + + int def = odp_schedule_default_prio(); + + /* needs to be synced with queue_prio_e values. Due to enum this can't be #if */ + COMPILE_TIME_ASSERT(EM_QUEUE_PRIO_HIGHEST < EM_QUEUE_PRIO_NUM, + "queue_prio_e values / EM_QUEUE_PRIO_NUM mismatch!\n"); + + /* init both ends first */ + for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) + em_shm->queue_prio.map[i] = i < (EM_QUEUE_PRIO_NUM / 2) ? minp : maxp; + + /* then add NORMAL in the middle */ + em_shm->queue_prio.map[EM_QUEUE_PRIO_NORMAL] = def; + /* if room: widen the normal range a bit */ + if (EM_QUEUE_PRIO_NORMAL - EM_QUEUE_PRIO_LOW > 1) /* legacy 4-2 */ + em_shm->queue_prio.map[EM_QUEUE_PRIO_NORMAL - 1] = def; + if (EM_QUEUE_PRIO_HIGH - EM_QUEUE_PRIO_NORMAL > 1) /* legacy 6-4 */ + em_shm->queue_prio.map[EM_QUEUE_PRIO_NORMAL + 1] = def; +} + +static void queue_init_prio_adaptive(int minp, int maxp, int nump) +{ + double step = (double)nump / EM_QUEUE_PRIO_NUM; + double cur = (double)minp; + + /* simple linear fit to available levels */ + + for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { + em_shm->queue_prio.map[i] = (int)cur; + cur += step; + } + + /* last EM prio always highest ODP level */ + if (em_shm->queue_prio.map[EM_QUEUE_PRIO_NUM - 1] != maxp) + em_shm->queue_prio.map[EM_QUEUE_PRIO_NUM - 1] = maxp; +} + +static int queue_init_prio_custom(int minp, int maxp) +{ + for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { + em_shm->queue_prio.map[i] = minp + em_shm->opt.queue.priority.custom_map[i]; + if (em_shm->queue_prio.map[i] > maxp || em_shm->queue_prio.map[i] < minp) { + EM_PRINT("Invalid odp priority %d!\n", em_shm->queue_prio.map[i]); + return -1; + } + } + return 0; +} + +static int queue_init_prio_map(int minp, int maxp, int nump) +{ + /* EM normally uses 8 priority levels (EM_QUEUE_PRIO_NUM). + * These are mapped to ODP runtime values depending on selected map mode + */ + + switch (em_shm->opt.queue.priority.map_mode) { + case 0: /* legacy mode, use only 3 levels */ + queue_init_prio_legacy(minp, maxp); + break; + case 1: /* adapt to runtime (full spread) */ + queue_init_prio_adaptive(minp, maxp, nump); + break; + case 2: /** custom */ + if (queue_init_prio_custom(minp, maxp) != 0) + return -1; + break; + default: + EM_PRINT("Unknown map_mode %d!\n", em_shm->opt.queue.priority.map_mode); + return -1; + } + + EM_PRINT(" EM uses %d priorities, runtime %d (%d-%d)\n", + EM_QUEUE_PRIO_NUM, nump, minp, nump - minp - 1); + print_queue_prio_info(); + return 0; +} + +const char *queue_get_state_str(queue_state_t state) +{ + const char *str; + + switch (state) { + case EM_QUEUE_STATE_INVALID: + str = "INVALID"; + break; + case EM_QUEUE_STATE_INIT: + str = "INIT"; + break; + case EM_QUEUE_STATE_BIND: + str = "BIND"; + break; + case EM_QUEUE_STATE_READY: + str = "READY"; + break; + case EM_QUEUE_STATE_UNSCHEDULED: + str = "UNSCH"; + break; + default: + str = "UNKNOWN"; + break; + } + + return str; +} + +const char *queue_get_type_str(em_queue_type_t type) +{ + const char *type_str; + + switch (type) { + case EM_QUEUE_TYPE_UNDEF: + type_str = "UNDEF"; + break; + case EM_QUEUE_TYPE_ATOMIC: + type_str = "ATOMIC"; + break; + case EM_QUEUE_TYPE_PARALLEL: + type_str = "PARALLEL"; + break; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + type_str = "ORDERED"; + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + type_str = "UNSCH"; + break; + case EM_QUEUE_TYPE_LOCAL: + type_str = "LOCAL"; + break; + case EM_QUEUE_TYPE_OUTPUT: + type_str = "OUTPUT"; + break; + default: + type_str = "UNKNOWN"; + break; + } + + return type_str; +} + +#define QUEUE_INFO_HDR_STR \ +"Number of queues: %d\n\n" \ +"Handle Name Priority Type State Qgrp" \ +" Agrp EO Multi-rcv Max-events Ctx\n" \ +"---------------------------------------------------------------------------" \ +"----------------------------------------------------\n" \ +"%s\n" + +#define QUEUE_INFO_LEN 128 + +#define QUEUE_INFO_FMT \ +"%-10" PRI_QUEUE "%-32s%-10" PRI_QPRIO "%-10s%-9s%-10" PRI_QGRP "%-10" PRI_AGRP \ +"%-10" PRI_EO "%-11c%-12d%-3c\n" /*128 bytes per queue*/ + +void print_queue_info(void) +{ + unsigned int q_num; + const queue_elem_t *q_elem; + char q_name[EM_QUEUE_NAME_LEN]; + int len = 0; + int n_print = 0; + + em_queue_t q = em_queue_get_first(&q_num); + + /* q_num may not match the amount of queues actually returned by iterating + * using em_queue_get_next() if queues are added or removed in parallel + * by another core. Thus space for 10 extra queues is reserved. If more + * than 10 queues are added by other cores in parallel, we print only info + * of the (q_num + 10) queues. + */ + const int q_info_buf_len = (q_num + 10) * QUEUE_INFO_LEN + 1/*Terminating null byte*/; + char q_info_buf[q_info_buf_len]; + + while (q != EM_QUEUE_UNDEF) { + q_elem = queue_elem_get(q); + + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { + q = em_queue_get_next(); + continue; + } + + queue_get_name(q_elem, q_name, EM_QUEUE_NAME_LEN - 1); + n_print = snprintf(q_info_buf + len, + q_info_buf_len - len, + QUEUE_INFO_FMT, + q, q_name, q_elem->priority, + queue_get_type_str(q_elem->type), + queue_get_state_str(q_elem->state), + q_elem->queue_group, + q_elem->atomic_group, + q_elem->eo, + q_elem->use_multi_rcv ? 'Y' : 'N', + q_elem->max_events, + q_elem->context ? 'Y' : 'N'); + + /* Not enough space to hold more queue info */ + if (n_print >= q_info_buf_len - len) + break; + + len += n_print; + q = em_queue_get_next(); + } + + /* No queue */ + if (len == 0) { + EM_PRINT("No EM queue!\n"); + return; + } + + /* + * To prevent printing incomplete information of the last queue when + * there is not enough space to hold all queue info. + */ + q_info_buf[len] = '\0'; + EM_PRINT(QUEUE_INFO_HDR_STR, q_num, q_info_buf); +} diff --git a/src/em_queue.h b/src/em_queue.h index 7d003fe7..5abc76ba 100644 --- a/src/em_queue.h +++ b/src/em_queue.h @@ -1,399 +1,197 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * EM internal queue functions - */ - -#ifndef EM_QUEUE_H_ -#define EM_QUEUE_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include - -#define DIFF_ABS(a, b) ((a) > (b) ? (a) - (b) : (b) - (a)) -#define SMALLEST_NBR(a, b) ((a) > (b) ? (b) : (a)) - -em_status_t -queue_init(queue_tbl_t *const queue_tbl, - queue_pool_t *const queue_pool, - queue_pool_t *const queue_pool_static); - -em_status_t -queue_init_local(void); -em_status_t -queue_term_local(void); - -em_queue_t -queue_create(const char *name, em_queue_type_t type, em_queue_prio_t prio, - em_queue_group_t queue_group, em_queue_t queue_req, - em_atomic_group_t atomic_group, const em_queue_conf_t *conf, - const char **err_str); - -em_status_t -queue_delete(queue_elem_t *const queue_elem); - -em_status_t -queue_enable(queue_elem_t *const q_elem); - -em_status_t -queue_enable_all(eo_elem_t *const eo_elem); - -em_status_t -queue_disable(queue_elem_t *const q_elem); - -em_status_t -queue_disable_all(eo_elem_t *const eo_elem); - -em_status_t -queue_state_change__check(queue_state_t old_state, queue_state_t new_state, - int is_setup /* vs. is_teardown */); -em_status_t -queue_state_change(queue_elem_t *const queue_elem, queue_state_t new_state); - -em_status_t -queue_state_change_all(eo_elem_t *const eo_elem, queue_state_t new_state); - -unsigned int queue_count(void); - -size_t queue_get_name(const queue_elem_t *const q_elem, - char name[/*out*/], const size_t maxlen); - -em_event_t queue_dequeue(const queue_elem_t *q_elem); -int queue_dequeue_multi(const queue_elem_t *q_elem, - em_event_t events[/*out*/], int num); - -/** Print information about all EM queues */ -void print_queue_info(void); -/** Print queue capabilities */ -void print_queue_capa(void); -void print_queue_prio_info(void); - -/** Get the string of a queue state */ -const char *queue_get_state_str(queue_state_t state); -/** Get the string of a queue type */ -const char *queue_get_type_str(em_queue_type_t type); - -/** - * Enqueue multiple events into an unscheduled queue. - * Internal func, application should use em_send_multi() instead. - */ -static inline unsigned int -queue_unsched_enqueue_multi(const em_event_t events[], int num, - const queue_elem_t *const q_elem) -{ - odp_event_t odp_events[num]; - odp_queue_t odp_queue = q_elem->odp_queue; - int ret; - - if (unlikely(EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID)) - return 0; - - if (unlikely(EM_CHECK_LEVEL > 0 && - q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) - return 0; - - events_em2odp(events, odp_events, num); - - ret = odp_queue_enq_multi(odp_queue, odp_events, num); - if (unlikely(ret < 0)) - return 0; - - return ret; -} - -/** - * Enqueue en event into an unscheduled queue. - * Internal func, application should use em_send() instead. - */ -static inline em_status_t -queue_unsched_enqueue(em_event_t event, const queue_elem_t *const q_elem) -{ - odp_event_t odp_event = event_em2odp(event); - odp_queue_t odp_queue = q_elem->odp_queue; - int ret; - - if (unlikely(EM_CHECK_LEVEL > 1 && - (odp_event == ODP_EVENT_INVALID || - odp_queue == ODP_QUEUE_INVALID))) - return EM_ERR_NOT_FOUND; - - if (unlikely(EM_CHECK_LEVEL > 0 && - q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) - return EM_ERR_BAD_STATE; - - ret = odp_queue_enq(odp_queue, odp_event); - if (unlikely(EM_CHECK_LEVEL > 0 && ret != 0)) - return EM_ERR_LIB_FAILED; - - return EM_OK; -} - -/** Is the queue allocated? */ -static inline int -queue_allocated(const queue_elem_t *const queue_elem) -{ - return !objpool_in_pool(&queue_elem->queue_pool_elem); -} - -/** Convert EM queue handle to queue index */ -static inline int -queue_hdl2idx(em_queue_t queue) -{ - internal_queue_t iq = {.queue = queue}; - int queue_idx; - - queue_idx = iq.queue_id - EM_QUEUE_RANGE_OFFSET; - - return queue_idx; -} - -/** Convert queue index to EM queue handle */ -static inline em_queue_t -queue_idx2hdl(int queue_idx) -{ - internal_queue_t iq = {.queue = 0}; - - iq.queue_id = queue_idx + EM_QUEUE_RANGE_OFFSET; - iq.device_id = em_shm->conf.device_id; - - return iq.queue; -} - -/** Convert queue ID (internal_queue_t:queue_id) to queue index */ -static inline int -queue_id2idx(uint16_t queue_id) -{ - return (int)queue_id - EM_QUEUE_RANGE_OFFSET; -} - -/** Convert queue ID (internal_queue_t:queue_id) handle to EM queue handle */ -static inline em_queue_t -queue_id2hdl(uint16_t queue_id) -{ - internal_queue_t iq = {.queue = 0}; - - iq.queue_id = queue_id; - iq.device_id = em_shm->conf.device_id; - - return iq.queue; -} - -/** - * Return 'true' if the EM queue handle belongs to another EM instance. - * - * Sending to external queues will cause EM to call the user provided - * functions 'event_send_device' or 'event_send_device_multi' - */ -static inline bool -queue_external(em_queue_t queue) -{ - internal_queue_t iq = {.queue = queue}; - - if (unlikely(queue == EM_QUEUE_UNDEF)) - return 0; - - return iq.device_id != em_shm->conf.device_id ? true : false; -} - -/** Returns queue element associated with queued id 'queue' */ -static inline queue_elem_t * -queue_elem_get(const em_queue_t queue) -{ - int queue_idx; - internal_queue_t iq; - queue_elem_t *queue_elem; - - iq.queue = queue; - queue_idx = queue_id2idx(iq.queue_id); - - if (unlikely(iq.device_id != em_shm->conf.device_id || - (unsigned int)queue_idx > EM_MAX_QUEUES - 1)) - return NULL; - - queue_elem = &em_shm->queue_tbl.queue_elem[queue_idx]; - - return queue_elem; -} - -static inline em_queue_t -queue_current(void) -{ - const queue_elem_t *const q_elem = em_locm.current.q_elem; - - if (unlikely(q_elem == NULL)) - return EM_QUEUE_UNDEF; - - return q_elem->queue; -} - -static inline queue_elem_t * -list_node_to_queue_elem(const list_node_t *const list_node) -{ - queue_elem_t *const q_elem = (queue_elem_t *)((uintptr_t)list_node - - offsetof(queue_elem_t, queue_node)); - - return likely(list_node != NULL) ? q_elem : NULL; -} - -static inline int -prio_em2odp(em_queue_prio_t em_prio, odp_schedule_prio_t *odp_prio /*out*/) -{ - if (em_prio < EM_QUEUE_PRIO_NUM) { - *odp_prio = em_shm->queue_prio.map[em_prio]; - return 0; - } - return -1; -} - -static inline int -scheduled_queue_type_em2odp(em_queue_type_t em_queue_type, - odp_schedule_sync_t *odp_schedule_sync /* out */) -{ - switch (em_queue_type) { - case EM_QUEUE_TYPE_ATOMIC: - *odp_schedule_sync = ODP_SCHED_SYNC_ATOMIC; - return 0; - case EM_QUEUE_TYPE_PARALLEL: - *odp_schedule_sync = ODP_SCHED_SYNC_PARALLEL; - return 0; - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - *odp_schedule_sync = ODP_SCHED_SYNC_ORDERED; - return 0; - default: - return -1; - } -} - -static inline int -scheduled_queue_type_odp2em(odp_schedule_sync_t odp_schedule_sync, - em_queue_type_t *em_queue_type /* out */) -{ - switch (odp_schedule_sync) { - case ODP_SCHED_SYNC_ATOMIC: - *em_queue_type = EM_QUEUE_TYPE_ATOMIC; - return 0; - case ODP_SCHED_SYNC_PARALLEL: - *em_queue_type = EM_QUEUE_TYPE_PARALLEL; - return 0; - case ODP_SCHED_SYNC_ORDERED: - *em_queue_type = EM_QUEUE_TYPE_PARALLEL_ORDERED; - return 0; - default: - *em_queue_type = EM_QUEUE_TYPE_UNDEF; - return 0; - } -} - -static inline event_hdr_t * -local_queue_dequeue(void) -{ - em_locm_t *const locm = &em_locm; - odp_queue_t local_queue; - odp_event_t odp_event; - em_event_t event; - event_hdr_t *ev_hdr; - em_queue_prio_t prio; - int i; - - if (locm->local_queues.empty) - return NULL; - - prio = EM_QUEUE_PRIO_NUM - 1; - for (i = 0; i < EM_QUEUE_PRIO_NUM; i++) { - /* from hi to lo prio: next prio if local queue is empty */ - if (locm->local_queues.prio[prio].empty_prio) { - prio--; - continue; - } - - local_queue = locm->local_queues.prio[prio].queue; - odp_event = odp_queue_deq(local_queue); - - if (odp_event != ODP_EVENT_INVALID) { - event = event_odp2em(odp_event); /* .evgen not set */ - ev_hdr = event_to_hdr(event); - return ev_hdr; - } - - locm->local_queues.prio[prio].empty_prio = 1; - prio--; - } - - locm->local_queues.empty = 1; - return NULL; -} - -static inline int -next_local_queue_events(em_event_t ev_tbl[/*out*/], int num_events) -{ - em_locm_t *const locm = &em_locm; - - if (locm->local_queues.empty) - return 0; - - /* use same output-array: odp_evtbl[] = ev_tbl[] */ - odp_event_t *const odp_evtbl = (odp_event_t *)ev_tbl; - - em_queue_prio_t prio; - odp_queue_t local_queue; - int num; - - prio = EM_QUEUE_PRIO_NUM - 1; - for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { - /* from hi to lo prio: next prio if local queue is empty */ - if (locm->local_queues.prio[prio].empty_prio) { - prio--; - continue; - } - - local_queue = locm->local_queues.prio[prio].queue; - num = odp_queue_deq_multi(local_queue, odp_evtbl/*out=ev_tbl*/, - num_events); - if (num > 0) - return num; /* odp_evtbl[] = ev_tbl[], .evgen not set */ - - locm->local_queues.prio[prio].empty_prio = 1; - prio--; - } - - locm->local_queues.empty = 1; - return 0; -} - -#ifdef __cplusplus -} -#endif - -#endif /* EM_QUEUE_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal queue functions + */ + +#ifndef EM_QUEUE_H_ +#define EM_QUEUE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +em_status_t queue_init(queue_tbl_t *const queue_tbl, + queue_pool_t *const queue_pool, + queue_pool_t *const queue_pool_static); + +em_status_t queue_init_local(void); +em_status_t queue_term_local(void); + +em_queue_t queue_alloc(em_queue_t queue, const char **err_str); +em_status_t queue_free(em_queue_t queue); + +void queue_setup_common(queue_elem_t *q_elem /*out*/, + const queue_setup_t *setup); + +em_queue_t +queue_create(const char *name, em_queue_type_t type, em_queue_prio_t prio, + em_queue_group_t queue_group, em_queue_t queue_req, + em_atomic_group_t atomic_group, const em_queue_conf_t *conf, + const char **err_str); +em_status_t +queue_delete(queue_elem_t *const queue_elem); + +em_status_t +queue_enable(queue_elem_t *const q_elem); + +em_status_t +queue_enable_all(eo_elem_t *const eo_elem); + +em_status_t +queue_disable(queue_elem_t *const q_elem); + +em_status_t +queue_disable_all(eo_elem_t *const eo_elem); + +em_status_t +queue_state_change__check(queue_state_t old_state, queue_state_t new_state, + int is_setup /* vs. is_teardown */); +em_status_t +queue_state_change(queue_elem_t *const queue_elem, queue_state_t new_state); + +em_status_t +queue_state_change_all(eo_elem_t *const eo_elem, queue_state_t new_state); + +unsigned int queue_count(void); + +size_t queue_get_name(const queue_elem_t *const q_elem, + char name[/*out*/], const size_t maxlen); + +em_event_t queue_dequeue(const queue_elem_t *q_elem); +int queue_dequeue_multi(const queue_elem_t *q_elem, + em_event_t events[/*out*/], int num); + +/** Print information about all EM queues */ +void print_queue_info(void); +/** Print queue capabilities */ +void print_queue_capa(void); +void print_queue_prio_info(void); + +/** Get the string of a queue state */ +const char *queue_get_state_str(queue_state_t state); +/** Get the string of a queue type */ +const char *queue_get_type_str(em_queue_type_t type); + +/** + * Enqueue multiple events into an unscheduled queue. + * Internal func, application should use em_send_multi() instead. + */ +static inline unsigned int +queue_unsched_enqueue_multi(const em_event_t events[], int num, + const queue_elem_t *const q_elem) +{ + odp_event_t odp_events[num]; + odp_queue_t odp_queue = q_elem->odp_queue; + int ret; + + if (unlikely(EM_CHECK_LEVEL > 1 && odp_queue == ODP_QUEUE_INVALID)) + return 0; + + if (unlikely(EM_CHECK_LEVEL > 0 && + q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) + return 0; + + events_em2odp(events, odp_events, num); + + ret = odp_queue_enq_multi(odp_queue, odp_events, num); + if (unlikely(ret < 0)) + return 0; + + return ret; +} + +/** + * Enqueue en event into an unscheduled queue. + * Internal func, application should use em_send() instead. + */ +static inline em_status_t +queue_unsched_enqueue(em_event_t event, const queue_elem_t *const q_elem) +{ + odp_event_t odp_event = event_em2odp(event); + odp_queue_t odp_queue = q_elem->odp_queue; + int ret; + + if (unlikely(EM_CHECK_LEVEL > 1 && + (odp_event == ODP_EVENT_INVALID || + odp_queue == ODP_QUEUE_INVALID))) + return EM_ERR_NOT_FOUND; + + if (unlikely(EM_CHECK_LEVEL > 0 && + q_elem->state != EM_QUEUE_STATE_UNSCHEDULED)) + return EM_ERR_BAD_STATE; + + ret = odp_queue_enq(odp_queue, odp_event); + if (unlikely(EM_CHECK_LEVEL > 0 && ret != 0)) + return EM_ERR_LIB_FAILED; + + return EM_OK; +} + +static inline int +next_local_queue_events(stash_entry_t entry_tbl[/*out*/], int num_events) +{ + em_locm_t *const locm = &em_locm; + + if (locm->local_queues.empty) + return 0; + + em_queue_prio_t prio = EM_QUEUE_PRIO_NUM - 1; + + for (int i = 0; i < EM_QUEUE_PRIO_NUM; i++) { + /* from hi to lo prio: next prio if local queue is empty */ + if (locm->local_queues.prio[prio].empty_prio) { + prio--; + continue; + } + + odp_stash_t stash = locm->local_queues.prio[prio].stash; + int num = odp_stash_get_u64(stash, &entry_tbl[0].u64 /*[out]*/, + num_events); + if (num > 0) + return num; + + locm->local_queues.prio[prio].empty_prio = 1; + prio--; + } + + locm->local_queues.empty = 1; + return 0; +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_QUEUE_H_ */ diff --git a/src/em_queue_inline.h b/src/em_queue_inline.h new file mode 100644 index 00000000..0ddfe21b --- /dev/null +++ b/src/em_queue_inline.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * EM internal queue functions + */ + +#ifndef EM_QUEUE_INLINE_H_ +#define EM_QUEUE_INLINE_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#define DIFF_ABS(a, b) ((a) > (b) ? (a) - (b) : (b) - (a)) +#define SMALLEST_NBR(a, b) ((a) > (b) ? (b) : (a)) + +/** Is the queue allocated? */ +static inline int +queue_allocated(const queue_elem_t *const queue_elem) +{ + return !objpool_in_pool(&queue_elem->queue_pool_elem); +} + +/** Convert EM queue handle to queue index */ +static inline int +queue_hdl2idx(em_queue_t queue) +{ + internal_queue_t iq = {.queue = queue}; + int queue_idx; + + queue_idx = iq.queue_id - EM_QUEUE_RANGE_OFFSET; + + return queue_idx; +} + +/** Convert queue index to EM queue handle */ +static inline em_queue_t +queue_idx2hdl(int queue_idx) +{ + internal_queue_t iq = {.queue = 0}; + + iq.queue_id = queue_idx + EM_QUEUE_RANGE_OFFSET; + iq.device_id = em_shm->conf.device_id; + + return iq.queue; +} + +/** Convert queue ID (internal_queue_t:queue_id) to queue index */ +static inline int +queue_id2idx(uint16_t queue_id) +{ + return (int)queue_id - EM_QUEUE_RANGE_OFFSET; +} + +/** Convert queue ID (internal_queue_t:queue_id) handle to EM queue handle */ +static inline em_queue_t +queue_id2hdl(uint16_t queue_id) +{ + internal_queue_t iq = {.queue = 0}; + + iq.queue_id = queue_id; + iq.device_id = em_shm->conf.device_id; + + return iq.queue; +} + +/** + * Return 'true' if the EM queue handle belongs to another EM instance. + * + * Sending to external queues will cause EM to call the user provided + * functions 'event_send_device' or 'event_send_device_multi' + */ +static inline bool +queue_external(em_queue_t queue) +{ + internal_queue_t iq = {.queue = queue}; + + if (unlikely(queue == EM_QUEUE_UNDEF)) + return 0; + + return iq.device_id != em_shm->conf.device_id ? true : false; +} + +/** Returns queue element associated with queued id 'queue' */ +static inline queue_elem_t * +queue_elem_get(const em_queue_t queue) +{ + int queue_idx; + internal_queue_t iq; + queue_elem_t *queue_elem; + + iq.queue = queue; + queue_idx = queue_id2idx(iq.queue_id); + + if (unlikely(iq.device_id != em_shm->conf.device_id || + (unsigned int)queue_idx > EM_MAX_QUEUES - 1)) + return NULL; + + queue_elem = &em_shm->queue_tbl.queue_elem[queue_idx]; + + return queue_elem; +} + +static inline em_queue_t +queue_current(void) +{ + const queue_elem_t *const q_elem = em_locm.current.q_elem; + + if (unlikely(q_elem == NULL)) + return EM_QUEUE_UNDEF; + + return q_elem->queue; +} + +static inline queue_elem_t * +list_node_to_queue_elem(const list_node_t *const list_node) +{ + queue_elem_t *const q_elem = (queue_elem_t *)((uintptr_t)list_node + - offsetof(queue_elem_t, queue_node)); + + return likely(list_node != NULL) ? q_elem : NULL; +} + +static inline int +prio_em2odp(em_queue_prio_t em_prio, odp_schedule_prio_t *odp_prio /*out*/) +{ + if (em_prio < EM_QUEUE_PRIO_NUM) { + *odp_prio = em_shm->queue_prio.map[em_prio]; + return 0; + } + return -1; +} + +static inline int +scheduled_queue_type_em2odp(em_queue_type_t em_queue_type, + odp_schedule_sync_t *odp_schedule_sync /* out */) +{ + switch (em_queue_type) { + case EM_QUEUE_TYPE_ATOMIC: + *odp_schedule_sync = ODP_SCHED_SYNC_ATOMIC; + return 0; + case EM_QUEUE_TYPE_PARALLEL: + *odp_schedule_sync = ODP_SCHED_SYNC_PARALLEL; + return 0; + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + *odp_schedule_sync = ODP_SCHED_SYNC_ORDERED; + return 0; + default: + return -1; + } +} + +static inline int +scheduled_queue_type_odp2em(odp_schedule_sync_t odp_schedule_sync, + em_queue_type_t *em_queue_type /* out */) +{ + switch (odp_schedule_sync) { + case ODP_SCHED_SYNC_ATOMIC: + *em_queue_type = EM_QUEUE_TYPE_ATOMIC; + return 0; + case ODP_SCHED_SYNC_PARALLEL: + *em_queue_type = EM_QUEUE_TYPE_PARALLEL; + return 0; + case ODP_SCHED_SYNC_ORDERED: + *em_queue_type = EM_QUEUE_TYPE_PARALLEL_ORDERED; + return 0; + default: + *em_queue_type = EM_QUEUE_TYPE_UNDEF; + return 0; + } +} + +#ifdef __cplusplus +} +#endif + +#endif /* EM_QUEUE_INLINE_H_ */ diff --git a/src/em_queue_types.h b/src/em_queue_types.h index c242b40a..871b3e7d 100644 --- a/src/em_queue_types.h +++ b/src/em_queue_types.h @@ -1,260 +1,290 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * EM internal queue types & definitions - * - */ - -#ifndef EM_QUEUE_TYPES_H_ -#define EM_QUEUE_TYPES_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * EM internal queue ids - local part of the queue only, i.e missing the - * device-id. - * Note that the EM queue handle range is determined by 'EM_QUEUE_RANGE_OFFSET' - */ -#define MAX_INTERNAL_QUEUES ROUND_UP(EM_MAX_CORES + 1, 32) - -#define _FIRST_INTERNAL_QUEUE (_EM_QUEUE_STATIC_MAX + 1) -#define FIRST_INTERNAL_QUEUE ((uint16_t)_FIRST_INTERNAL_QUEUE) - -#define _LAST_INTERNAL_QUEUE (_FIRST_INTERNAL_QUEUE + MAX_INTERNAL_QUEUES - 1) -#define LAST_INTERNAL_QUEUE ((uint16_t)_LAST_INTERNAL_QUEUE) - -#define FIRST_INTERNAL_UNSCHED_QUEUE (FIRST_INTERNAL_QUEUE) -#define SHARED_INTERNAL_UNSCHED_QUEUE (LAST_INTERNAL_QUEUE) - -/* Priority for the EM-internal queues */ -#define INTERNAL_QUEUE_PRIORITY (EM_QUEUE_PRIO_HIGHEST) - -COMPILE_TIME_ASSERT(MAX_INTERNAL_QUEUES - 1 >= EM_MAX_CORES, - TOO_FEW_INTERNAL_QUEUES_ERROR); - -/* Dynamic queue ids */ -#define _FIRST_DYN_QUEUE (_LAST_INTERNAL_QUEUE + 1) -#define FIRST_DYN_QUEUE ((uint16_t)_FIRST_DYN_QUEUE) - -#define MAX_DYN_QUEUES (EM_MAX_QUEUES - \ - (_FIRST_DYN_QUEUE - EM_QUEUE_RANGE_OFFSET)) - -#define _LAST_DYN_QUEUE (_FIRST_DYN_QUEUE + MAX_DYN_QUEUES - 1) -#define LAST_DYN_QUEUE ((uint16_t)_LAST_DYN_QUEUE) - -COMPILE_TIME_ASSERT(_FIRST_DYN_QUEUE > _LAST_INTERNAL_QUEUE, - FIRST_DYN_QUEUE_ERROR); - -/* Verify that the byte order is defined for 'internal_queue_t' */ -#if \ -(__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__) && \ -(__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) -#error __BYTE_ORDER__ not defined! -#endif - -/** - * Internal represenation of the EM queue handle - * The EM queue handle contains a 16-bit queue-id and a 16-bit device-id. - */ -typedef union { - em_queue_t queue; - struct { -#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ - uint16_t queue_id; - uint16_t device_id; -#ifdef EM_64_BIT - uint32_t unused; -#endif -#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -#ifdef EM_64_BIT - uint32_t unused; -#endif - uint16_t device_id; - uint16_t queue_id; -#endif - }; -} internal_queue_t; - -/* Assert that all EM queues can fit into the .queue_id field */ -COMPILE_TIME_ASSERT(UINT16_MAX >= EM_MAX_QUEUES, - INTERNAL_QUEUE_ID_MAX_ERROR); -/* Verify size of struct, i.e. accept no padding */ -COMPILE_TIME_ASSERT(sizeof(internal_queue_t) == sizeof(em_queue_t), - INTERNAL_QUEUE_T_SIZE_ERROR); - -/* - * Queue state - */ -typedef enum queue_state { - /** Invalid queue state, queue not created/allocated */ - EM_QUEUE_STATE_INVALID = 0, - - /* - * Scheduled queue (ATOMIC, PARALLEL, ORDERED) states: - * (keep state values consecutive: ...n-1,n,n+1...) - */ - /** Queue initialization, allocated and being set up */ - EM_QUEUE_STATE_INIT = 1, - /** Queue added/bound to an EO, but EO-start not yet complete */ - EM_QUEUE_STATE_BIND = 2, - /** Queue ready, related EO started */ - EM_QUEUE_STATE_READY = 3, - - /* - * Non-scheduled queue (UNSCHED, OUTPUT) state use the UNSCHEDULED-state. - */ - /* Use separete value for unscheduled queues to catch illegal usage */ - EM_QUEUE_STATE_UNSCHEDULED = 999 -} queue_state_t; - -/** - * Atomic-group queue specific part of the queue element - */ -typedef struct q_elem_atomic_group_ { - /** List node for linking queue elems belonging to an atomic group */ - list_node_t agrp_node; -} q_elem_atomic_group_t; - -/** - * Output queue specific part of the queue element - */ -typedef struct q_elem_output_ { - em_output_queue_conf_t output_conf; - /* Copied output_fn_args content of length 'args_len' stored in event */ - em_event_t output_fn_args_event; - env_spinlock_t lock; -} q_elem_output_t; - -/** - * EM queue element - */ -typedef struct queue_elem_t { - /** Queue handle */ - em_queue_t queue; - /** Associated ODP queue handle */ - odp_queue_t odp_queue; - /** Queue priority */ - em_queue_prio_t priority; - /** Atomic, parallel, ordered, unscheduled, local, output */ - em_queue_type_t type; - /** set if queue is scheduled, i.e. atomic, parallel or ordered */ - uint32_t scheduled; - /** Queue state */ - queue_state_t state; - /** Queue group handle of this queue */ - em_queue_group_t queue_group; - /** The atomic group handle (if any) of this queue */ - em_atomic_group_t atomic_group; - /** User defined queue context (can be NULL) */ - void *context; - - /** EM EO that this queue belongs to */ - em_eo_t eo; - /** Associated eo element */ - eo_elem_t *eo_elem; - /** Copy of the user defined eo context (or NULL) for performance */ - void *eo_ctx; - - int use_multi_rcv; /* true:receive_multi_func(), false:receive_func() */ - int max_events; - /** Copy of the event receive function for better performance */ - em_receive_func_t receive_func; - /** Copy of the multi-event receive function for better performance */ - em_receive_multi_func_t receive_multi_func; - - union { - q_elem_atomic_group_t agrp; - q_elem_output_t output; - }; - - /** List node for linking queue elems belonging to an EO */ - list_node_t queue_node; - /** List node for linking queue elems belonging to a queue group */ - list_node_t qgrp_node; - /** Queue pool elem for linking free queues for queue_alloc() */ - objpool_elem_t queue_pool_elem; - - /** Guarantee that size is a multiple of cache line size */ - void *end[0] ENV_CACHE_LINE_ALIGNED; -} queue_elem_t ENV_CACHE_LINE_ALIGNED; - -COMPILE_TIME_ASSERT(sizeof(queue_elem_t) % ENV_CACHE_LINE_SIZE == 0, - QUEUE_ELEM_T__SIZE_ERROR); - -/** - * EM queue element table - */ -typedef struct queue_tbl_t { - /** Queue element table */ - queue_elem_t queue_elem[EM_MAX_QUEUES] ENV_CACHE_LINE_ALIGNED; - /** ODP queue capabilities common for all queues */ - odp_queue_capability_t odp_queue_capability; - /** ODP schedule capabilities related to queues */ - odp_schedule_capability_t odp_schedule_capability; - /** Queue name table */ - char name[EM_MAX_QUEUES][EM_QUEUE_NAME_LEN] ENV_CACHE_LINE_ALIGNED; -} queue_tbl_t; - -/** - * Pool of free queues - */ -typedef struct queue_pool_t { - objpool_t objpool; -} queue_pool_t; - -/** - * Local queues, i.e. core-local storage for events to local queues - */ -typedef struct local_queues_t { - int empty; - struct { - int empty_prio; - odp_queue_t queue; - } prio[EM_QUEUE_PRIO_NUM]; -} local_queues_t; - -/** - * Track output-queues used during a dispatch round (burst) - */ -typedef struct output_queue_track_t { - unsigned int idx_cnt; - uint16_t idx[EM_MAX_QUEUES]; - queue_elem_t *used_queues[EM_MAX_QUEUES]; -} output_queue_track_t; - -#ifdef __cplusplus -} -#endif - -#endif /* EM_QUEUE_TYPES_H_ */ +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * EM internal queue types & definitions + * + */ + +#ifndef EM_QUEUE_TYPES_H_ +#define EM_QUEUE_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * EM internal queue ids - local part of the queue only, i.e missing the + * device-id. + * Note that the EM queue handle range is determined by 'EM_QUEUE_RANGE_OFFSET' + */ +#define MAX_INTERNAL_QUEUES ROUND_UP(EM_MAX_CORES + 1, 32) + +#define _FIRST_INTERNAL_QUEUE (_EM_QUEUE_STATIC_MAX + 1) +#define FIRST_INTERNAL_QUEUE ((uint16_t)_FIRST_INTERNAL_QUEUE) + +#define _LAST_INTERNAL_QUEUE (_FIRST_INTERNAL_QUEUE + MAX_INTERNAL_QUEUES - 1) +#define LAST_INTERNAL_QUEUE ((uint16_t)_LAST_INTERNAL_QUEUE) + +#define FIRST_INTERNAL_UNSCHED_QUEUE (FIRST_INTERNAL_QUEUE) +#define SHARED_INTERNAL_UNSCHED_QUEUE (LAST_INTERNAL_QUEUE) + +/* Priority for the EM-internal queues */ +#define INTERNAL_QUEUE_PRIORITY (EM_QUEUE_PRIO_HIGHEST) + +COMPILE_TIME_ASSERT(MAX_INTERNAL_QUEUES - 1 >= EM_MAX_CORES, + TOO_FEW_INTERNAL_QUEUES_ERROR); + +/* Dynamic queue ids */ +#define _FIRST_DYN_QUEUE (_LAST_INTERNAL_QUEUE + 1) +#define FIRST_DYN_QUEUE ((uint16_t)_FIRST_DYN_QUEUE) + +#define MAX_DYN_QUEUES (EM_MAX_QUEUES - \ + (_FIRST_DYN_QUEUE - EM_QUEUE_RANGE_OFFSET)) + +#define _LAST_DYN_QUEUE (_FIRST_DYN_QUEUE + MAX_DYN_QUEUES - 1) +#define LAST_DYN_QUEUE ((uint16_t)_LAST_DYN_QUEUE) + +COMPILE_TIME_ASSERT(_FIRST_DYN_QUEUE > _LAST_INTERNAL_QUEUE, + FIRST_DYN_QUEUE_ERROR); + +#define QUEUE_ELEM_VALID ((uint32_t)0xABBACAFE) + +/* Verify that the byte order is defined for 'internal_queue_t' */ +#if \ +(__BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__) && \ +(__BYTE_ORDER__ != __ORDER_BIG_ENDIAN__) +#error __BYTE_ORDER__ not defined! +#endif + +/** + * Queue create-params passed to queue_setup...() + */ +typedef struct { + const char *name; + em_queue_type_t type; + em_queue_prio_t prio; + em_atomic_group_t atomic_group; + em_queue_group_t queue_group; + const em_queue_conf_t *conf; +} queue_setup_t; + +/** + * Internal represenation of the EM queue handle + * The EM queue handle contains a 16-bit queue-id and a 16-bit device-id. + */ +typedef union { + em_queue_t queue; + struct { +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + uint16_t queue_id; + uint16_t device_id; +#ifdef EM_64_BIT + uint32_t unused; +#endif +#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +#ifdef EM_64_BIT + uint32_t unused; +#endif + uint16_t device_id; + uint16_t queue_id; +#endif + }; +} internal_queue_t; + +/* Assert that all EM queues can fit into the .queue_id field */ +COMPILE_TIME_ASSERT(UINT16_MAX >= EM_MAX_QUEUES, + INTERNAL_QUEUE_ID_MAX_ERROR); +/* Verify size of struct, i.e. accept no padding */ +COMPILE_TIME_ASSERT(sizeof(internal_queue_t) == sizeof(em_queue_t), + INTERNAL_QUEUE_T_SIZE_ERROR); + +/* + * Queue state + */ +typedef enum queue_state { + /** Invalid queue state, queue not created/allocated */ + EM_QUEUE_STATE_INVALID = 0, + + /* + * Scheduled queue (ATOMIC, PARALLEL, ORDERED) states: + * (keep state values consecutive: ...n-1,n,n+1...) + */ + /** Queue initialization, allocated and being set up */ + EM_QUEUE_STATE_INIT = 1, + /** Queue added/bound to an EO, but EO-start not yet complete */ + EM_QUEUE_STATE_BIND = 2, + /** Queue ready, related EO started */ + EM_QUEUE_STATE_READY = 3, + + /* + * Non-scheduled queue (UNSCHED, OUTPUT) state use the UNSCHEDULED-state. + */ + /* Use separete value for unscheduled queues to catch illegal usage */ + EM_QUEUE_STATE_UNSCHEDULED = 999 +} queue_state_t; + +/** + * Atomic-group queue specific part of the queue element + */ +typedef struct q_elem_atomic_group_ { + /** List node for linking queue elems belonging to an atomic group */ + list_node_t agrp_node; +} q_elem_atomic_group_t; + +/** + * Output queue specific part of the queue element + */ +typedef struct q_elem_output_ { + em_output_queue_conf_t output_conf; + /* Copied output_fn_args content of length 'args_len' stored in event */ + em_event_t output_fn_args_event; + env_spinlock_t lock; +} q_elem_output_t; + +/** + * EM queue element + */ +typedef struct queue_elem_t { + /** + * Check that contents is an EM queue elem. + * + * EM will verify that the ODP queue context actually points to an + * EM queue elem and not to something else: + * queue_elem_t *q_elem = odp_queue_context(odp_queue); + * if (!q_elem || q_elem->valid_check != QUEUE_ELEM_VALID) + * EM_ERROR(...); + * Keep first. + */ + uint32_t valid_check; + + /** Queue handle */ + em_queue_t queue; + + /** Associated ODP queue handle */ + odp_queue_t odp_queue; + /** Is this an ODP pktin event queue (true/false)? */ + bool is_pktin; + + /** Queue priority */ + em_queue_prio_t priority; + /** Atomic, parallel, ordered, unscheduled, local, output */ + em_queue_type_t type; + /** set if queue is scheduled, i.e. atomic, parallel or ordered */ + uint32_t scheduled; + /** Queue state */ + queue_state_t state; + /** Queue group handle of this queue */ + em_queue_group_t queue_group; + /** The atomic group handle (if any) of this queue */ + em_atomic_group_t atomic_group; + /** User defined queue context (can be NULL) */ + void *context; + + /** EM EO that this queue belongs to */ + em_eo_t eo; + /** Associated eo element */ + eo_elem_t *eo_elem; + /** Copy of the user defined eo context (or NULL) for performance */ + void *eo_ctx; + + int use_multi_rcv; /* true:receive_multi_func(), false:receive_func() */ + int max_events; + /** Copy of the event receive function for better performance */ + em_receive_func_t receive_func; + /** Copy of the multi-event receive function for better performance */ + em_receive_multi_func_t receive_multi_func; + + union { + q_elem_atomic_group_t agrp; + q_elem_output_t output; + }; + + /** List node for linking queue elems belonging to an EO */ + list_node_t queue_node; + /** List node for linking queue elems belonging to a queue group */ + list_node_t qgrp_node; + /** Queue pool elem for linking free queues for queue_alloc() */ + objpool_elem_t queue_pool_elem; + + /** Guarantee that size is a multiple of cache line size */ + void *end[0] ENV_CACHE_LINE_ALIGNED; +} queue_elem_t ENV_CACHE_LINE_ALIGNED; + +COMPILE_TIME_ASSERT(sizeof(queue_elem_t) % ENV_CACHE_LINE_SIZE == 0, + QUEUE_ELEM_T__SIZE_ERROR); + +/** + * EM queue element table + */ +typedef struct queue_tbl_t { + /** Queue element table */ + queue_elem_t queue_elem[EM_MAX_QUEUES] ENV_CACHE_LINE_ALIGNED; + /** ODP queue capabilities common for all queues */ + odp_queue_capability_t odp_queue_capability; + /** ODP schedule capabilities related to queues */ + odp_schedule_capability_t odp_schedule_capability; + /** Queue name table */ + char name[EM_MAX_QUEUES][EM_QUEUE_NAME_LEN] ENV_CACHE_LINE_ALIGNED; +} queue_tbl_t; + +/** + * Pool of free queues + */ +typedef struct queue_pool_t { + objpool_t objpool; +} queue_pool_t; + +/** + * Local queues, i.e. core-local storage for events to local queues + */ +typedef struct local_queues_t { + int empty; + struct { + int empty_prio; + odp_stash_t stash; + } prio[EM_QUEUE_PRIO_NUM]; +} local_queues_t; + +/** + * Track output-queues used during a dispatch round (burst) + */ +typedef struct output_queue_track_t { + unsigned int idx_cnt; + uint16_t idx[EM_MAX_QUEUES]; + queue_elem_t *used_queues[EM_MAX_QUEUES]; +} output_queue_track_t; + +#ifdef __cplusplus +} +#endif + +#endif /* EM_QUEUE_TYPES_H_ */ diff --git a/src/event_machine_atomic_group.c b/src/event_machine_atomic_group.c index 192d8de2..dbfbfeba 100644 --- a/src/event_machine_atomic_group.c +++ b/src/event_machine_atomic_group.c @@ -1,456 +1,485 @@ -/* - * Copyright (c) 2014, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* per core (thread) state for em_atomic_group_get_next() */ -static ENV_LOCAL unsigned int _agrp_tbl_iter_idx; -/* Per core (thread) state of em_atomic_group_queue_get_next() */ -static ENV_LOCAL unsigned int _agrp_q_iter_idx; -static ENV_LOCAL em_atomic_group_t _agrp_q_iter_agrp; - -em_atomic_group_t -em_atomic_group_create(const char *name, em_queue_group_t queue_group) -{ - em_atomic_group_t atomic_group = EM_ATOMIC_GROUP_UNDEF; - atomic_group_elem_t *ag_elem = NULL; - const char *err_str = ""; - em_status_t error = EM_OK; - odp_queue_param_t queue_param; - unsigned int size; - - if (unlikely(invalid_qgrp(queue_group))) { - error = EM_ERR_BAD_ID; - err_str = "Invalid queue group!"; - goto error; - } - - /* New Atomic group */ - atomic_group = atomic_group_alloc(); - - if (unlikely(atomic_group == EM_ATOMIC_GROUP_UNDEF)) { - error = EM_ERR_ALLOC_FAILED; - err_str = "Atomic group allocation failed!"; - goto error; - } - - /* Initialize the atomic group */ - ag_elem = atomic_group_elem_get(atomic_group); - if (unlikely(!ag_elem)) { - error = EM_ERR_BAD_POINTER; - err_str = "Atomic group allocation failed: ag_elem NULL!"; - goto error; - } - - env_atomic32_init(&ag_elem->num_queues); - - /* Store the related queue group */ - ag_elem->queue_group = queue_group; - - if (name != NULL) { - strncpy(ag_elem->name, name, sizeof(ag_elem->name)); - ag_elem->name[sizeof(ag_elem->name) - 1] = '\0'; - } else { - ag_elem->name[0] = '\0'; - } - - odp_queue_param_init(&queue_param); - - queue_param.type = ODP_QUEUE_TYPE_PLAIN; - queue_param.enq_mode = ODP_QUEUE_OP_MT; - /* dequeueing protected by ag_elem->lock */ - queue_param.deq_mode = ODP_QUEUE_OP_MT_UNSAFE; - /* Queue size: use EM default value from config file: */ - size = em_shm->opt.queue.min_events_default; - if (size != 0) - queue_param.size = size; - /* else: use odp default as set by odp_queue_param_init() */ - - ag_elem->internal_queue.hi_prio = odp_queue_create(ag_elem->name, - &queue_param); - ag_elem->internal_queue.lo_prio = odp_queue_create(ag_elem->name, - &queue_param); - if (unlikely(ag_elem->internal_queue.hi_prio == ODP_QUEUE_INVALID || - ag_elem->internal_queue.lo_prio == ODP_QUEUE_INVALID)) - goto error; - - return atomic_group; - -error: - INTERNAL_ERROR(error, EM_ESCOPE_ATOMIC_GROUP_CREATE, err_str); - if (atomic_group != EM_ATOMIC_GROUP_UNDEF) - em_atomic_group_delete(atomic_group); - - return EM_ATOMIC_GROUP_UNDEF; -} - -/* - * Helper for em_atomic_group_delete() - * Flush the atomic group's internal queues and then destroy them. - */ -static int -ag_internal_queue_destroy(odp_queue_t plain_q) -{ - odp_event_t odp_deq_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; - event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; - em_event_t ev_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; - int ev_cnt = 0; - bool esv_ena = esv_enabled(); - - if (plain_q == ODP_QUEUE_INVALID) - return -1; - - do { - ev_cnt = odp_queue_deq_multi(plain_q, odp_deq_tbl, - EM_SCHED_AG_MULTI_MAX_BURST); - if (ev_cnt <= 0) - break; - - events_odp2em(odp_deq_tbl, ev_tbl/*out*/, ev_cnt); - - if (esv_ena) { - event_to_hdr_multi(ev_tbl, ev_hdr_tbl/*out*/, ev_cnt); - evstate_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, - ev_cnt, EVSTATE__AG_DELETE); - } - - em_free_multi(ev_tbl, ev_cnt); - } while (ev_cnt > 0); - - return odp_queue_destroy(plain_q); -} - -em_status_t -em_atomic_group_delete(em_atomic_group_t atomic_group) -{ - atomic_group_elem_t *const ag_elem = - atomic_group_elem_get(atomic_group); - em_status_t error = EM_OK; - int err = 0; - - RETURN_ERROR_IF(ag_elem == NULL, - EM_ERR_BAD_ID, EM_ESCOPE_ATOMIC_GROUP_DELETE, - "Invalid atomic group - cannot delete!"); - - env_spinlock_lock(&ag_elem->lock); - - /* Error checks */ - err = !list_is_empty(&ag_elem->qlist_head); - err |= !atomic_group_allocated(ag_elem); - - if (unlikely(err)) { - env_spinlock_unlock(&ag_elem->lock); - return INTERNAL_ERROR(EM_ERR_BAD_STATE, - EM_ESCOPE_ATOMIC_GROUP_DELETE, - "Atomic group in bad state - cannot delete!"); - } - - /* Flush the atomic group's internal queues and destroy them */ - err = ag_internal_queue_destroy(ag_elem->internal_queue.hi_prio); - err |= ag_internal_queue_destroy(ag_elem->internal_queue.lo_prio); - - ag_elem->queue_group = EM_QUEUE_GROUP_UNDEF; - ag_elem->name[0] = '\0'; - - env_spinlock_unlock(&ag_elem->lock); - - /* Free the atomic group (elem) back into the AG-pool */ - error = atomic_group_free(atomic_group); - RETURN_ERROR_IF(error != EM_OK || err != 0, - error, EM_ESCOPE_ATOMIC_GROUP_DELETE, - "Atomic group free failed(%d)!", err); - - return EM_OK; -} - -em_queue_t -em_queue_create_ag(const char *name, em_queue_prio_t prio, - em_atomic_group_t atomic_group, const em_queue_conf_t *conf) -{ - em_queue_t queue; - queue_elem_t *q_elem; - em_queue_group_t queue_group; - atomic_group_elem_t *const ag_elem = - atomic_group_elem_get(atomic_group); - const char *err_str = ""; - - if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_CREATE_AG, - "Invalid Atomic Group:%" PRI_AGRP "", - atomic_group); - return EM_QUEUE_UNDEF; - } - - queue_group = ag_elem->queue_group; - - queue = queue_create(name, EM_QUEUE_TYPE_ATOMIC, prio, queue_group, - EM_QUEUE_UNDEF, atomic_group, conf, &err_str); - - if (unlikely(queue == EM_QUEUE_UNDEF)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_QUEUE_CREATE_AG, - "Atomic Group queue creation failed! (%s)", - err_str); - return EM_QUEUE_UNDEF; - } - - q_elem = queue_elem_get(queue); - /* Add queue to atomic group list */ - atomic_group_add_queue_list(ag_elem, q_elem); - - return queue; -} - -em_status_t -em_queue_create_static_ag(const char *name, em_queue_prio_t prio, - em_atomic_group_t atomic_group, em_queue_t queue, - const em_queue_conf_t *conf) -{ - em_queue_t queue_static; - queue_elem_t *q_elem; - em_queue_group_t queue_group; - atomic_group_elem_t *const ag_elem = - atomic_group_elem_get(atomic_group); - const char *err_str = ""; - - RETURN_ERROR_IF(ag_elem == NULL || !atomic_group_allocated(ag_elem), - EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_CREATE_STATIC_AG, - "Invalid Atomic Group:%" PRI_AGRP "", atomic_group); - - queue_group = ag_elem->queue_group; - - queue_static = queue_create(name, EM_QUEUE_TYPE_ATOMIC, prio, - queue_group, queue, atomic_group, conf, - &err_str); - - RETURN_ERROR_IF(queue_static == EM_QUEUE_UNDEF || - queue_static != queue, - EM_ERR_NOT_FREE, EM_ESCOPE_QUEUE_CREATE_STATIC_AG, - "Atomic Group static queue creation failed! (%s)", - err_str); - - q_elem = queue_elem_get(queue); - /* Add queue to atomic group list */ - atomic_group_add_queue_list(ag_elem, q_elem); - - return EM_OK; -} - -em_atomic_group_t -em_atomic_group_get(em_queue_t queue) -{ - const queue_elem_t *q_elem = queue_elem_get(queue); - - if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ATOMIC_GROUP_GET, - "Invalid queue:%" PRI_QUEUE "", queue); - return EM_ATOMIC_GROUP_UNDEF; - } - - return q_elem->atomic_group; -} - -size_t -em_atomic_group_get_name(em_atomic_group_t atomic_group, - char *name, size_t maxlen) -{ - const atomic_group_elem_t *ag_elem = - atomic_group_elem_get(atomic_group); - size_t len = 0; - - if (unlikely(name == NULL || maxlen == 0)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, - EM_ESCOPE_ATOMIC_GROUP_GET_NAME, - "Invalid args: name=0x%" PRIx64 ", maxlen=%zu", - name, maxlen); - return 0; - } - - if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ATOMIC_GROUP_GET_NAME, - "Invalid Atomic Group:%" PRI_AGRP "", - atomic_group); - name[0] = '\0'; - return 0; - } - - len = strnlen(ag_elem->name, sizeof(ag_elem->name) - 1); - if (maxlen - 1 < len) - len = maxlen - 1; - - memcpy(name, ag_elem->name, len); - name[len] = '\0'; - - return len; -} - -em_atomic_group_t -em_atomic_group_find(const char *name) -{ - if (name && *name) { - for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) { - const atomic_group_elem_t *ag_elem = - &em_shm->atomic_group_tbl.ag_elem[i]; - - if (atomic_group_allocated(ag_elem) && - !strncmp(name, ag_elem->name, - EM_ATOMIC_GROUP_NAME_LEN)) - return ag_elem->atomic_group; - } - } - return EM_ATOMIC_GROUP_UNDEF; -} - -em_atomic_group_t -em_atomic_group_get_first(unsigned int *num) -{ - const atomic_group_elem_t *const agrp_elem_tbl = - em_shm->atomic_group_tbl.ag_elem; - const atomic_group_elem_t *ag_elem = &agrp_elem_tbl[0]; - const unsigned int agrp_count = atomic_group_count(); - - _agrp_tbl_iter_idx = 0; /* reset iteration */ - - if (num) - *num = agrp_count; - - if (agrp_count == 0) { - _agrp_tbl_iter_idx = EM_MAX_ATOMIC_GROUPS; /*UNDEF=_get_next()*/ - return EM_ATOMIC_GROUP_UNDEF; - } - - /* find first */ - while (!atomic_group_allocated(ag_elem)) { - _agrp_tbl_iter_idx++; - if (_agrp_tbl_iter_idx >= EM_MAX_ATOMIC_GROUPS) - return EM_ATOMIC_GROUP_UNDEF; - ag_elem = &agrp_elem_tbl[_agrp_tbl_iter_idx]; - } - - return agrp_idx2hdl(_agrp_tbl_iter_idx); -} - -em_atomic_group_t -em_atomic_group_get_next(void) -{ - if (_agrp_tbl_iter_idx >= EM_MAX_ATOMIC_GROUPS - 1) - return EM_ATOMIC_GROUP_UNDEF; - - _agrp_tbl_iter_idx++; - - const atomic_group_elem_t *const agrp_elem_tbl = - em_shm->atomic_group_tbl.ag_elem; - const atomic_group_elem_t *ag_elem = &agrp_elem_tbl[_agrp_tbl_iter_idx]; - - /* find next */ - while (!atomic_group_allocated(ag_elem)) { - _agrp_tbl_iter_idx++; - if (_agrp_tbl_iter_idx >= EM_MAX_ATOMIC_GROUPS) - return EM_ATOMIC_GROUP_UNDEF; - ag_elem = &agrp_elem_tbl[_agrp_tbl_iter_idx]; - } - - return agrp_idx2hdl(_agrp_tbl_iter_idx); -} - -em_queue_t -em_atomic_group_queue_get_first(unsigned int *num, - em_atomic_group_t atomic_group) -{ - const atomic_group_elem_t *const agrp_elem = - atomic_group_elem_get(atomic_group); - - if (unlikely(agrp_elem == NULL || !atomic_group_allocated(agrp_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, - EM_ESCOPE_ATOMIC_GROUP_QUEUE_GET_FIRST, - "Invalid atomic group:%" PRI_AGRP "", - atomic_group); - if (num) - *num = 0; - return EM_QUEUE_UNDEF; - } - - const unsigned int num_queues = - env_atomic32_get(&agrp_elem->num_queues); - - if (num) - *num = num_queues; - - if (num_queues == 0) { - _agrp_q_iter_idx = EM_MAX_QUEUES; /* UNDEF = _get_next() */ - return EM_QUEUE_UNDEF; - } - - /* - * A 'agrp_elem' contains a linked list with all it's queues. That list - * might be modified while processing this iteration, so instead we just - * go through the whole queue table. - * This is potentially a slow implementation and perhaps worth - * re-thinking? - */ - const queue_elem_t *const q_elem_tbl = em_shm->queue_tbl.queue_elem; - const queue_elem_t *q_elem = &q_elem_tbl[0]; - - _agrp_q_iter_idx = 0; /* reset list */ - _agrp_q_iter_agrp = atomic_group; - - /* find first */ - while (!queue_allocated(q_elem) || - q_elem->atomic_group != _agrp_q_iter_agrp) { - _agrp_q_iter_idx++; - if (_agrp_q_iter_idx >= EM_MAX_QUEUES) - return EM_QUEUE_UNDEF; - q_elem = &q_elem_tbl[_agrp_q_iter_idx]; - } - - return queue_idx2hdl(_agrp_q_iter_idx); -} - -em_queue_t -em_atomic_group_queue_get_next(void) -{ - if (_agrp_q_iter_idx >= EM_MAX_QUEUES - 1) - return EM_QUEUE_UNDEF; - - _agrp_q_iter_idx++; - - const queue_elem_t *const q_elem_tbl = em_shm->queue_tbl.queue_elem; - const queue_elem_t *q_elem = &q_elem_tbl[_agrp_q_iter_idx]; - - /* find next */ - while (!queue_allocated(q_elem) || - q_elem->atomic_group != _agrp_q_iter_agrp) { - _agrp_q_iter_idx++; - if (_agrp_q_iter_idx >= EM_MAX_QUEUES) - return EM_QUEUE_UNDEF; - q_elem = &q_elem_tbl[_agrp_q_iter_idx]; - } - - return queue_idx2hdl(_agrp_q_iter_idx); -} +/* + * Copyright (c) 2014, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* per core (thread) state for em_atomic_group_get_next() */ +static ENV_LOCAL unsigned int _agrp_tbl_iter_idx; +/* Per core (thread) state of em_atomic_group_queue_get_next() */ +static ENV_LOCAL unsigned int _agrp_q_iter_idx; +static ENV_LOCAL em_atomic_group_t _agrp_q_iter_agrp; + +em_atomic_group_t +em_atomic_group_create(const char *name, em_queue_group_t queue_group) +{ + em_atomic_group_t atomic_group = EM_ATOMIC_GROUP_UNDEF; + atomic_group_elem_t *ag_elem = NULL; + const char *err_str = ""; + em_status_t error = EM_OK; + int ret = 0; + + if (unlikely(invalid_qgrp(queue_group))) { + error = EM_ERR_BAD_ID; + err_str = "Invalid queue group!"; + goto error; + } + + /* New Atomic group */ + atomic_group = atomic_group_alloc(); + + if (unlikely(atomic_group == EM_ATOMIC_GROUP_UNDEF)) { + error = EM_ERR_ALLOC_FAILED; + err_str = "Atomic group allocation failed!"; + goto error; + } + + /* Initialize the atomic group */ + ag_elem = atomic_group_elem_get(atomic_group); + if (unlikely(!ag_elem)) { + error = EM_ERR_BAD_POINTER; + err_str = "Atomic group allocation failed: ag_elem NULL!"; + goto error; + } + + env_atomic32_init(&ag_elem->num_queues); + + /* Store the related queue group */ + ag_elem->queue_group = queue_group; + + if (name != NULL) { + strncpy(ag_elem->name, name, sizeof(ag_elem->name)); + ag_elem->name[sizeof(ag_elem->name) - 1] = '\0'; + } else { + ag_elem->name[0] = '\0'; + } + + /* + * Create the AG internal stashes + */ + unsigned int num_obj = 0; + odp_stash_capability_t stash_capa; + odp_stash_param_t stash_param; + + ret = odp_stash_capability(&stash_capa, ODP_STASH_TYPE_FIFO); + if (ret != 0) { + error = EM_ERR_LIB_FAILED; + err_str = "odp_stash_capability() failed!"; + goto error; + } + + odp_stash_param_init(&stash_param); + + stash_param.type = ODP_STASH_TYPE_FIFO; + stash_param.put_mode = ODP_STASH_OP_MT; + /* 'get' protected by ag_elem->lock */ + stash_param.get_mode = ODP_STASH_OP_ST; + + /* Stash size: use EM default queue size value from config file: */ + num_obj = em_shm->opt.queue.min_events_default; + if (num_obj != 0) + stash_param.num_obj = num_obj; + /* else: use odp default as set by odp_stash_param_init() */ + + if (stash_param.num_obj > stash_capa.max_num_obj) { + EM_LOG(EM_LOG_PRINT, + "%s(): req stash.num_obj(%" PRIu64 ") > capa.max_num_obj(%" PRIu64 ").\n" + " ==> using max value:%" PRIu64 "\n", __func__, + stash_param.num_obj, stash_capa.max_num_obj, stash_capa.max_num_obj); + stash_param.num_obj = stash_capa.max_num_obj; + } + + stash_param.obj_size = sizeof(uint64_t); + stash_param.cache_size = 0; /* No core local caching */ + + ag_elem->stashes.hi_prio = odp_stash_create(ag_elem->name, &stash_param); + ag_elem->stashes.lo_prio = odp_stash_create(ag_elem->name, &stash_param); + if (unlikely(ag_elem->stashes.hi_prio == ODP_STASH_INVALID || + ag_elem->stashes.lo_prio == ODP_STASH_INVALID)) { + error = EM_ERR_LIB_FAILED; + err_str = "odp_stash_create() failed!"; + goto error; + } + + return atomic_group; + +error: + INTERNAL_ERROR(error, EM_ESCOPE_ATOMIC_GROUP_CREATE, err_str); + if (atomic_group != EM_ATOMIC_GROUP_UNDEF) + em_atomic_group_delete(atomic_group); + + return EM_ATOMIC_GROUP_UNDEF; +} + +/* + * Helper for em_atomic_group_delete() + * Flush the atomic group's internal queues and then destroy them. + */ +static int +ag_stash_destroy(odp_stash_t stash) +{ + stash_entry_t entry_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; + odp_event_t odp_evtbl[EM_SCHED_AG_MULTI_MAX_BURST]; + em_event_t ev_tbl[EM_SCHED_AG_MULTI_MAX_BURST]; + event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; + int32_t cnt = 0; + bool esv_ena = esv_enabled(); + + if (stash == ODP_STASH_INVALID) + return -1; + + do { + cnt = odp_stash_get_u64(stash, &entry_tbl[0].u64 /*[out]*/, + EM_SCHED_AG_MULTI_MAX_BURST); + if (cnt <= 0) + break; + for (int32_t i = 0; i < cnt; i++) + odp_evtbl[i] = (odp_event_t)(uintptr_t)entry_tbl[i].evptr; + + events_odp2em(odp_evtbl, ev_tbl/*out*/, cnt); + + if (esv_ena) { + event_to_hdr_multi(ev_tbl, ev_hdr_tbl/*out*/, cnt); + evstate_em2usr_multi(ev_tbl/*in/out*/, ev_hdr_tbl, + cnt, EVSTATE__AG_DELETE); + } + + em_free_multi(ev_tbl, cnt); + } while (cnt > 0); + + return odp_stash_destroy(stash); +} + +em_status_t +em_atomic_group_delete(em_atomic_group_t atomic_group) +{ + atomic_group_elem_t *const ag_elem = + atomic_group_elem_get(atomic_group); + em_status_t error = EM_OK; + int err = 0; + + RETURN_ERROR_IF(ag_elem == NULL, + EM_ERR_BAD_ID, EM_ESCOPE_ATOMIC_GROUP_DELETE, + "Invalid atomic group - cannot delete!"); + + env_spinlock_lock(&ag_elem->lock); + + /* Error checks */ + err = !list_is_empty(&ag_elem->qlist_head); + err |= !atomic_group_allocated(ag_elem); + + if (unlikely(err)) { + env_spinlock_unlock(&ag_elem->lock); + return INTERNAL_ERROR(EM_ERR_BAD_STATE, + EM_ESCOPE_ATOMIC_GROUP_DELETE, + "Atomic group in bad state - cannot delete!"); + } + + /* Flush the atomic group's internal queues and destroy them */ + err = ag_stash_destroy(ag_elem->stashes.hi_prio); + err |= ag_stash_destroy(ag_elem->stashes.lo_prio); + + ag_elem->queue_group = EM_QUEUE_GROUP_UNDEF; + ag_elem->name[0] = '\0'; + + env_spinlock_unlock(&ag_elem->lock); + + /* Free the atomic group (elem) back into the AG-pool */ + error = atomic_group_free(atomic_group); + RETURN_ERROR_IF(error != EM_OK || err != 0, + error, EM_ESCOPE_ATOMIC_GROUP_DELETE, + "Atomic group free failed(%d)!", err); + + return EM_OK; +} + +em_queue_t +em_queue_create_ag(const char *name, em_queue_prio_t prio, + em_atomic_group_t atomic_group, const em_queue_conf_t *conf) +{ + em_queue_t queue; + queue_elem_t *q_elem; + em_queue_group_t queue_group; + atomic_group_elem_t *const ag_elem = + atomic_group_elem_get(atomic_group); + const char *err_str = ""; + + if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_CREATE_AG, + "Invalid Atomic Group:%" PRI_AGRP "", + atomic_group); + return EM_QUEUE_UNDEF; + } + + queue_group = ag_elem->queue_group; + + queue = queue_create(name, EM_QUEUE_TYPE_ATOMIC, prio, queue_group, + EM_QUEUE_UNDEF, atomic_group, conf, &err_str); + + if (unlikely(queue == EM_QUEUE_UNDEF)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_QUEUE_CREATE_AG, + "Atomic Group queue creation failed! (%s)", + err_str); + return EM_QUEUE_UNDEF; + } + + q_elem = queue_elem_get(queue); + /* Add queue to atomic group list */ + atomic_group_add_queue_list(ag_elem, q_elem); + + return queue; +} + +em_status_t +em_queue_create_static_ag(const char *name, em_queue_prio_t prio, + em_atomic_group_t atomic_group, em_queue_t queue, + const em_queue_conf_t *conf) +{ + em_queue_t queue_static; + queue_elem_t *q_elem; + em_queue_group_t queue_group; + atomic_group_elem_t *const ag_elem = + atomic_group_elem_get(atomic_group); + const char *err_str = ""; + + RETURN_ERROR_IF(ag_elem == NULL || !atomic_group_allocated(ag_elem), + EM_ERR_BAD_ID, EM_ESCOPE_QUEUE_CREATE_STATIC_AG, + "Invalid Atomic Group:%" PRI_AGRP "", atomic_group); + + queue_group = ag_elem->queue_group; + + queue_static = queue_create(name, EM_QUEUE_TYPE_ATOMIC, prio, + queue_group, queue, atomic_group, conf, + &err_str); + + RETURN_ERROR_IF(queue_static == EM_QUEUE_UNDEF || + queue_static != queue, + EM_ERR_NOT_FREE, EM_ESCOPE_QUEUE_CREATE_STATIC_AG, + "Atomic Group static queue creation failed! (%s)", + err_str); + + q_elem = queue_elem_get(queue); + /* Add queue to atomic group list */ + atomic_group_add_queue_list(ag_elem, q_elem); + + return EM_OK; +} + +em_atomic_group_t +em_atomic_group_get(em_queue_t queue) +{ + const queue_elem_t *q_elem = queue_elem_get(queue); + + if (unlikely(q_elem == NULL || !queue_allocated(q_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ATOMIC_GROUP_GET, + "Invalid queue:%" PRI_QUEUE "", queue); + return EM_ATOMIC_GROUP_UNDEF; + } + + return q_elem->atomic_group; +} + +size_t +em_atomic_group_get_name(em_atomic_group_t atomic_group, + char *name, size_t maxlen) +{ + const atomic_group_elem_t *ag_elem = + atomic_group_elem_get(atomic_group); + size_t len = 0; + + if (unlikely(name == NULL || maxlen == 0)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, + EM_ESCOPE_ATOMIC_GROUP_GET_NAME, + "Invalid args: name=0x%" PRIx64 ", maxlen=%zu", + name, maxlen); + return 0; + } + + if (unlikely(ag_elem == NULL || !atomic_group_allocated(ag_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ATOMIC_GROUP_GET_NAME, + "Invalid Atomic Group:%" PRI_AGRP "", + atomic_group); + name[0] = '\0'; + return 0; + } + + len = strnlen(ag_elem->name, sizeof(ag_elem->name) - 1); + if (maxlen - 1 < len) + len = maxlen - 1; + + memcpy(name, ag_elem->name, len); + name[len] = '\0'; + + return len; +} + +em_atomic_group_t +em_atomic_group_find(const char *name) +{ + if (name && *name) { + for (int i = 0; i < EM_MAX_ATOMIC_GROUPS; i++) { + const atomic_group_elem_t *ag_elem = + &em_shm->atomic_group_tbl.ag_elem[i]; + + if (atomic_group_allocated(ag_elem) && + !strncmp(name, ag_elem->name, + EM_ATOMIC_GROUP_NAME_LEN)) + return ag_elem->atomic_group; + } + } + return EM_ATOMIC_GROUP_UNDEF; +} + +em_atomic_group_t +em_atomic_group_get_first(unsigned int *num) +{ + const atomic_group_elem_t *const agrp_elem_tbl = + em_shm->atomic_group_tbl.ag_elem; + const atomic_group_elem_t *ag_elem = &agrp_elem_tbl[0]; + const unsigned int agrp_count = atomic_group_count(); + + _agrp_tbl_iter_idx = 0; /* reset iteration */ + + if (num) + *num = agrp_count; + + if (agrp_count == 0) { + _agrp_tbl_iter_idx = EM_MAX_ATOMIC_GROUPS; /*UNDEF=_get_next()*/ + return EM_ATOMIC_GROUP_UNDEF; + } + + /* find first */ + while (!atomic_group_allocated(ag_elem)) { + _agrp_tbl_iter_idx++; + if (_agrp_tbl_iter_idx >= EM_MAX_ATOMIC_GROUPS) + return EM_ATOMIC_GROUP_UNDEF; + ag_elem = &agrp_elem_tbl[_agrp_tbl_iter_idx]; + } + + return agrp_idx2hdl(_agrp_tbl_iter_idx); +} + +em_atomic_group_t +em_atomic_group_get_next(void) +{ + if (_agrp_tbl_iter_idx >= EM_MAX_ATOMIC_GROUPS - 1) + return EM_ATOMIC_GROUP_UNDEF; + + _agrp_tbl_iter_idx++; + + const atomic_group_elem_t *const agrp_elem_tbl = + em_shm->atomic_group_tbl.ag_elem; + const atomic_group_elem_t *ag_elem = &agrp_elem_tbl[_agrp_tbl_iter_idx]; + + /* find next */ + while (!atomic_group_allocated(ag_elem)) { + _agrp_tbl_iter_idx++; + if (_agrp_tbl_iter_idx >= EM_MAX_ATOMIC_GROUPS) + return EM_ATOMIC_GROUP_UNDEF; + ag_elem = &agrp_elem_tbl[_agrp_tbl_iter_idx]; + } + + return agrp_idx2hdl(_agrp_tbl_iter_idx); +} + +em_queue_t +em_atomic_group_queue_get_first(unsigned int *num, + em_atomic_group_t atomic_group) +{ + const atomic_group_elem_t *const agrp_elem = + atomic_group_elem_get(atomic_group); + + if (unlikely(agrp_elem == NULL || !atomic_group_allocated(agrp_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, + EM_ESCOPE_ATOMIC_GROUP_QUEUE_GET_FIRST, + "Invalid atomic group:%" PRI_AGRP "", + atomic_group); + if (num) + *num = 0; + return EM_QUEUE_UNDEF; + } + + const unsigned int num_queues = + env_atomic32_get(&agrp_elem->num_queues); + + if (num) + *num = num_queues; + + if (num_queues == 0) { + _agrp_q_iter_idx = EM_MAX_QUEUES; /* UNDEF = _get_next() */ + return EM_QUEUE_UNDEF; + } + + /* + * A 'agrp_elem' contains a linked list with all it's queues. That list + * might be modified while processing this iteration, so instead we just + * go through the whole queue table. + * This is potentially a slow implementation and perhaps worth + * re-thinking? + */ + const queue_elem_t *const q_elem_tbl = em_shm->queue_tbl.queue_elem; + const queue_elem_t *q_elem = &q_elem_tbl[0]; + + _agrp_q_iter_idx = 0; /* reset list */ + _agrp_q_iter_agrp = atomic_group; + + /* find first */ + while (!queue_allocated(q_elem) || + q_elem->atomic_group != _agrp_q_iter_agrp) { + _agrp_q_iter_idx++; + if (_agrp_q_iter_idx >= EM_MAX_QUEUES) + return EM_QUEUE_UNDEF; + q_elem = &q_elem_tbl[_agrp_q_iter_idx]; + } + + return queue_idx2hdl(_agrp_q_iter_idx); +} + +em_queue_t +em_atomic_group_queue_get_next(void) +{ + if (_agrp_q_iter_idx >= EM_MAX_QUEUES - 1) + return EM_QUEUE_UNDEF; + + _agrp_q_iter_idx++; + + const queue_elem_t *const q_elem_tbl = em_shm->queue_tbl.queue_elem; + const queue_elem_t *q_elem = &q_elem_tbl[_agrp_q_iter_idx]; + + /* find next */ + while (!queue_allocated(q_elem) || + q_elem->atomic_group != _agrp_q_iter_agrp) { + _agrp_q_iter_idx++; + if (_agrp_q_iter_idx >= EM_MAX_QUEUES) + return EM_QUEUE_UNDEF; + q_elem = &q_elem_tbl[_agrp_q_iter_idx]; + } + + return queue_idx2hdl(_agrp_q_iter_idx); +} diff --git a/src/event_machine_dispatcher.c b/src/event_machine_dispatcher.c index b90e8d10..220b95b5 100644 --- a/src/event_machine_dispatcher.c +++ b/src/event_machine_dispatcher.c @@ -1,229 +1,227 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* - * em_dispatch() helper: check if the user provided callback functions - * 'input_poll' and 'output_drain' should be called in - * this dispatch round - */ -static inline bool -check_poll_drain_round(unsigned int interval, odp_time_t poll_drain_period) -{ - if (interval > 1) { - em_locm_t *const locm = &em_locm; - - locm->poll_drain_dispatch_cnt--; - if (locm->poll_drain_dispatch_cnt == 0) { - odp_time_t now = odp_time_global(); - odp_time_t period; - - period = odp_time_diff(now, locm->poll_drain_dispatch_last_run); - locm->poll_drain_dispatch_cnt = interval; - - if (odp_time_cmp(poll_drain_period, period) < 0) { - locm->poll_drain_dispatch_last_run = now; - return true; - } - } - } else { - return true; - } - return false; -} - -/* - * em_dispatch() helper: dispatch and call the user provided callback functions - * 'input_poll' and 'output_drain' - */ -static inline uint64_t -dispatch_with_userfn(uint64_t rounds, bool do_input_poll, bool do_output_drain) -{ - const bool do_forever = rounds == 0 ? true : false; - const em_input_poll_func_t input_poll = - em_shm->conf.input.input_poll_fn; - const em_output_drain_func_t output_drain = - em_shm->conf.output.output_drain_fn; - int rx_events = 0; - int dispatched_events; - int round_events; - uint64_t events = 0; - uint64_t i; - bool do_poll_drain_round; - const unsigned int poll_interval = em_shm->opt.dispatch.poll_drain_interval; - const odp_time_t poll_period = em_shm->opt.dispatch.poll_drain_interval_time; - - for (i = 0; do_forever || i < rounds;) { - dispatched_events = 0; - - do_poll_drain_round = check_poll_drain_round(poll_interval, poll_period); - - if (do_input_poll && do_poll_drain_round) - rx_events = input_poll(); - - do { - round_events = dispatch_round(); - dispatched_events += round_events; - i++; /* inc rounds */ - } while (dispatched_events < rx_events && - round_events > 0 && (do_forever || i < rounds)); - - events += dispatched_events; /* inc ret value*/ - if (do_output_drain && do_poll_drain_round) - (void)output_drain(); - } - - return events; -} - -/* - * em_dispatch() helper: dispatch without calling any user provided callbacks - */ -static inline uint64_t -dispatch_no_userfn(uint64_t rounds) -{ - const bool do_forever = rounds == 0 ? true : false; - uint64_t events = 0; - uint64_t i; - - if (do_forever) { - for (;/*ever*/;) - dispatch_round(); - } else { - for (i = 0; i < rounds; i++) - events += dispatch_round(); - } - - return events; -} - -uint64_t -em_dispatch(uint64_t rounds /* 0 = forever */) -{ - uint64_t events = 0; - int round_events; - - const em_locm_t *const locm = &em_locm; - const bool do_input_poll = locm->do_input_poll; - const bool do_output_drain = locm->do_output_drain; - - odp_schedule_resume(); - - if (do_input_poll || do_output_drain) - events = dispatch_with_userfn(rounds, do_input_poll, - do_output_drain); - else - events = dispatch_no_userfn(rounds); - - /* pause scheduling before exiting the dispatch loop */ - odp_schedule_pause(); - /* empty the locally pre-scheduled events (if any) */ - do { - round_events = dispatch_round(); - events += round_events; - } while (round_events > 0); - - return events; -} - -em_status_t -em_dispatch_register_enter_cb(em_dispatch_enter_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_enter = func; - stat = hook_register(DISPATCH_CALLBACK_ENTER, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, - "Dispatch callback register failed"); - - return EM_OK; -} - -em_status_t -em_dispatch_unregister_enter_cb(em_dispatch_enter_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_enter = func; - stat = hook_unregister(DISPATCH_CALLBACK_ENTER, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, - "Dispatch callback unregister failed"); - - return EM_OK; -} - -em_status_t -em_dispatch_register_exit_cb(em_dispatch_exit_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_exit = func; - stat = hook_register(DISPATCH_CALLBACK_EXIT, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, - "Dispatch callback register failed"); - return EM_OK; -} - -em_status_t -em_dispatch_unregister_exit_cb(em_dispatch_exit_func_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, - "EM dispatch callbacks disabled"); - - hook_fn.disp_exit = func; - stat = hook_unregister(DISPATCH_CALLBACK_EXIT, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, - EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, - "Dispatch callback unregister failed"); - return EM_OK; -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* + * em_dispatch() helper: check if the user provided callback functions + * 'input_poll' and 'output_drain' should be called in + * this dispatch round + */ +static inline bool +check_poll_drain_round(unsigned int interval, odp_time_t poll_drain_period) +{ + if (interval > 1) { + em_locm_t *const locm = &em_locm; + + locm->poll_drain_dispatch_cnt--; + if (locm->poll_drain_dispatch_cnt == 0) { + odp_time_t now = odp_time_global(); + odp_time_t period; + + period = odp_time_diff(now, locm->poll_drain_dispatch_last_run); + locm->poll_drain_dispatch_cnt = interval; + + if (odp_time_cmp(poll_drain_period, period) < 0) { + locm->poll_drain_dispatch_last_run = now; + return true; + } + } + } else { + return true; + } + return false; +} + +/* + * em_dispatch() helper: dispatch and call the user provided callback functions + * 'input_poll' and 'output_drain' + */ +static inline uint64_t +dispatch_with_userfn(uint64_t rounds, bool do_input_poll, bool do_output_drain) +{ + const bool do_forever = rounds == 0 ? true : false; + const em_input_poll_func_t input_poll = + em_shm->conf.input.input_poll_fn; + const em_output_drain_func_t output_drain = + em_shm->conf.output.output_drain_fn; + int rx_events = 0; + int dispatched_events; + int round_events; + uint64_t events = 0; + bool do_poll_drain_round; + const unsigned int poll_interval = em_shm->opt.dispatch.poll_drain_interval; + const odp_time_t poll_period = em_shm->opt.dispatch.poll_drain_interval_time; + + for (uint64_t i = 0; do_forever || i < rounds;) { + dispatched_events = 0; + + do_poll_drain_round = check_poll_drain_round(poll_interval, poll_period); + + if (do_input_poll && do_poll_drain_round) + rx_events = input_poll(); + + do { + round_events = dispatch_round(); + dispatched_events += round_events; + i++; /* inc rounds */ + } while (dispatched_events < rx_events && + round_events > 0 && (do_forever || i < rounds)); + + events += dispatched_events; /* inc ret value*/ + if (do_output_drain && do_poll_drain_round) + (void)output_drain(); + } + + return events; +} + +/* + * em_dispatch() helper: dispatch without calling any user provided callbacks + */ +static inline uint64_t +dispatch_no_userfn(uint64_t rounds) +{ + const bool do_forever = rounds == 0 ? true : false; + uint64_t events = 0; + + if (do_forever) { + for (;/*ever*/;) + dispatch_round(); + } else { + for (uint64_t i = 0; i < rounds; i++) + events += dispatch_round(); + } + + return events; +} + +uint64_t +em_dispatch(uint64_t rounds /* 0 = forever */) +{ + uint64_t events = 0; + int round_events; + + const em_locm_t *const locm = &em_locm; + const bool do_input_poll = locm->do_input_poll; + const bool do_output_drain = locm->do_output_drain; + + odp_schedule_resume(); + + if (do_input_poll || do_output_drain) + events = dispatch_with_userfn(rounds, do_input_poll, + do_output_drain); + else + events = dispatch_no_userfn(rounds); + + /* pause scheduling before exiting the dispatch loop */ + odp_schedule_pause(); + /* empty the locally pre-scheduled events (if any) */ + do { + round_events = dispatch_round(); + events += round_events; + } while (round_events > 0); + + return events; +} + +em_status_t +em_dispatch_register_enter_cb(em_dispatch_enter_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_enter = func; + stat = hook_register(DISPATCH_CALLBACK_ENTER, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_REGISTER_ENTER_CB, + "Dispatch callback register failed"); + + return EM_OK; +} + +em_status_t +em_dispatch_unregister_enter_cb(em_dispatch_enter_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_enter = func; + stat = hook_unregister(DISPATCH_CALLBACK_ENTER, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_UNREGISTER_ENTER_CB, + "Dispatch callback unregister failed"); + + return EM_OK; +} + +em_status_t +em_dispatch_register_exit_cb(em_dispatch_exit_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_exit = func; + stat = hook_register(DISPATCH_CALLBACK_EXIT, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_REGISTER_EXIT_CB, + "Dispatch callback register failed"); + return EM_OK; +} + +em_status_t +em_dispatch_unregister_exit_cb(em_dispatch_exit_func_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_DISPATCH_CALLBACKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, + "EM dispatch callbacks disabled"); + + hook_fn.disp_exit = func; + stat = hook_unregister(DISPATCH_CALLBACK_EXIT, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, + EM_ESCOPE_DISPATCH_UNREGISTER_EXIT_CB, + "Dispatch callback unregister failed"); + return EM_OK; +} diff --git a/src/event_machine_eo.c b/src/event_machine_eo.c index 6a0dd9c8..3b4e3aa5 100644 --- a/src/event_machine_eo.c +++ b/src/event_machine_eo.c @@ -1,1127 +1,1145 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* Per core (thread) state of em_eo_get_next() */ -static ENV_LOCAL unsigned int _eo_tbl_iter_idx; -/* Per core (thread) state of em_eo_queue_get_next() */ -static ENV_LOCAL unsigned int _eo_q_iter_idx; -static ENV_LOCAL em_eo_t _eo_q_iter_eo; - -em_eo_t -em_eo_create(const char *name, - em_start_func_t start, - em_start_local_func_t local_start, - em_stop_func_t stop, - em_stop_local_func_t local_stop, - em_receive_func_t receive, - const void *eo_ctx) -{ - em_eo_t eo; - eo_elem_t *eo_elem; - - if (unlikely(start == NULL || stop == NULL || receive == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_CREATE, - "Mandatory EO function pointer(s) NULL!"); - return EM_EO_UNDEF; - } - - eo = eo_alloc(); - if (unlikely(eo == EM_EO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_CREATE, - "EO alloc failed!"); - return EM_EO_UNDEF; - } - - eo_elem = eo_elem_get(eo); - if (unlikely(eo_elem == NULL)) { - /* Fatal since eo_alloc() returned 'ok', should never happen */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), EM_ESCOPE_EO_CREATE, - "Invalid EO:%" PRI_EO "", eo); - return EM_EO_UNDEF; - } - - env_spinlock_lock(&eo_elem->lock); - - /* Store the name */ - if (name != NULL) { - strncpy(eo_elem->name, name, sizeof(eo_elem->name)); - eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; - } else { - eo_elem->name[0] = '\0'; - } - - /* EO's queue list init */ - list_init(&eo_elem->queue_list); - /* EO start: event buffering list init */ - list_init(&eo_elem->startfn_evlist); - - eo_elem->state = EM_EO_STATE_CREATED; - eo_elem->start_func = start; - eo_elem->start_local_func = local_start; - eo_elem->stop_func = stop; - eo_elem->stop_local_func = local_stop; - - eo_elem->use_multi_rcv = EM_FALSE; - eo_elem->max_events = 1; - eo_elem->receive_func = receive; - eo_elem->receive_multi_func = NULL; - - eo_elem->error_handler_func = NULL; - eo_elem->eo_ctx = (void *)(uintptr_t)eo_ctx; - eo_elem->eo = eo; - env_atomic32_init(&eo_elem->num_queues); - - env_spinlock_unlock(&eo_elem->lock); - - return eo; -} - -void em_eo_multircv_param_init(em_eo_multircv_param_t *param) -{ - if (unlikely(!param)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_EO_MULTIRCV_PARAM_INIT, - "Param pointer NULL!"); - return; - } - memset(param, 0, sizeof(em_eo_multircv_param_t)); - param->max_events = EM_EO_MULTIRCV_MAX_EVENTS; - param->__internal_check = EM_CHECK_INIT_CALLED; -} - -em_eo_t -em_eo_create_multircv(const char *name, const em_eo_multircv_param_t *param) -{ - em_eo_t eo; - eo_elem_t *eo_elem; - int max_events; - - if (unlikely(!param || - param->__internal_check != EM_CHECK_INIT_CALLED)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_CREATE_MULTIRCV, - "Invalid param ptr:\n" - "Use em_eo_multircv_param_init() before create"); - return EM_EO_UNDEF; - } - - if (unlikely(!param->start || !param->stop || !param->receive_multi)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_CREATE_MULTIRCV, - "Mandatory EO function pointer(s) NULL!"); - return EM_EO_UNDEF; - } - - if (unlikely(param->max_events < 0)) { - INTERNAL_ERROR(EM_ERR_TOO_SMALL, EM_ESCOPE_EO_CREATE_MULTIRCV, - "Max number of events too small:%d", - param->max_events); - return EM_EO_UNDEF; - } - max_events = param->max_events; - if (max_events == 0) /* user requests default value */ - max_events = EM_EO_MULTIRCV_MAX_EVENTS; - - eo = eo_alloc(); - if (unlikely(eo == EM_EO_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_CREATE_MULTIRCV, - "EO alloc failed!"); - return EM_EO_UNDEF; - } - - eo_elem = eo_elem_get(eo); - if (unlikely(eo_elem == NULL)) { - /* Fatal since eo_alloc() returned 'ok', should never happen */ - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), - EM_ESCOPE_EO_CREATE_MULTIRCV, - "Invalid EO:%" PRI_EO "", eo); - return EM_EO_UNDEF; - } - - env_spinlock_lock(&eo_elem->lock); - - /* Store the name */ - if (name) { - strncpy(eo_elem->name, name, sizeof(eo_elem->name)); - eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; - } else { - eo_elem->name[0] = '\0'; - } - - /* EO's queue list init */ - list_init(&eo_elem->queue_list); - /* EO start: event buffering list init */ - list_init(&eo_elem->startfn_evlist); - - eo_elem->state = EM_EO_STATE_CREATED; - eo_elem->start_func = param->start; - eo_elem->start_local_func = param->local_start; - eo_elem->stop_func = param->stop; - eo_elem->stop_local_func = param->local_stop; - - eo_elem->use_multi_rcv = EM_TRUE; - eo_elem->max_events = max_events; - eo_elem->receive_func = NULL; - eo_elem->receive_multi_func = param->receive_multi; - - eo_elem->error_handler_func = NULL; - eo_elem->eo_ctx = (void *)(uintptr_t)param->eo_ctx; - eo_elem->eo = eo; - env_atomic32_init(&eo_elem->num_queues); - - env_spinlock_unlock(&eo_elem->lock); - - return eo; -} - -em_status_t -em_eo_delete(em_eo_t eo) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t status; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, EM_ESCOPE_EO_DELETE, - "Invalid EO:%" PRI_EO "!", eo); - - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_EO_DELETE, - "EO not allocated:%" PRI_EO "", eo); - - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED && - eo_elem->state != EM_EO_STATE_ERROR, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_DELETE, - "EO invalid state, cannot delete:%d", eo_elem->state); - - status = eo_delete_queue_all(eo_elem); - - RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, - "EO delete: delete queues failed!"); - - /* Free EO back into the eo-pool and mark state=EO_STATE_UNDEF */ - status = eo_free(eo); - RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, - "EO delete failed!"); - - return status; -} - -size_t -em_eo_get_name(em_eo_t eo, char *name, size_t maxlen) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - - if (name == NULL || maxlen == 0) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_GET_NAME, - "Invalid ptr or maxlen (name=0x%" PRIx64 ", maxlen=%zu)", - name, maxlen); - return 0; - } - - name[0] = '\0'; - - if (unlikely(eo_elem == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_GET_NAME, - "Invalid EO id %" PRI_EO "", eo); - return 0; - } - - if (unlikely(!eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_EO_GET_NAME, - "EO not allocated:%" PRI_EO "", eo); - return 0; - } - - return eo_get_name(eo_elem, name, maxlen); -} - -em_eo_t -em_eo_find(const char *name) -{ - if (name && *name) { - for (int i = 0; i < EM_MAX_EOS; i++) { - const eo_elem_t *eo_elem = &em_shm->eo_tbl.eo_elem[i]; - - if (eo_elem->state != EM_EO_STATE_UNDEF && - !strncmp(name, eo_elem->name, EM_EO_NAME_LEN - 1)) - return eo_elem->eo; - } - } - return EM_EO_UNDEF; -} - -/** - * @brief Helper for em_eo_add_queue/_sync() - */ -static em_status_t -eo_add_queue_escope(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[], - em_escope_t escope) -{ eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const q_elem = queue_elem_get(queue); - em_queue_type_t q_type; - em_status_t err; - int valid; - - RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, - EM_ERR_BAD_ARG, escope, - "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), - EM_ERR_BAD_ARG, escope, - "Not allocated: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - - q_type = em_queue_get_type(queue); - valid = q_type == EM_QUEUE_TYPE_ATOMIC || - q_type == EM_QUEUE_TYPE_PARALLEL || - q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || - q_type == EM_QUEUE_TYPE_LOCAL; - RETURN_ERROR_IF(!valid, EM_ERR_BAD_CONTEXT, escope, - "Invalid queue type: %" PRI_QTYPE "", q_type); - - if (num_notif > 0) { - err = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(err != EM_OK, err, escope, - "Invalid notif cfg given!"); - } - - err = eo_add_queue(eo_elem, q_elem); - RETURN_ERROR_IF(err != EM_OK, err, escope, - "eo_add_queue(Q:%" PRI_QUEUE ") fails", queue); - - if (eo_elem->state == EM_EO_STATE_RUNNING) { - err = queue_enable(q_elem); /* otherwise enabled in eo-start */ - RETURN_ERROR_IF(err != EM_OK, err, escope, - "queue_enable(Q:%" PRI_QUEUE ") fails", queue); - } - - if (num_notif > 0) { - /* Send notifications if requested */ - err = send_notifs(num_notif, notif_tbl); - RETURN_ERROR_IF(err != EM_OK, err, escope, - "EO:%" PRI_EO " send notif fails", eo); - } - - return EM_OK; -} - -em_status_t -em_eo_add_queue(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[]) -{ - return eo_add_queue_escope(eo, queue, num_notif, notif_tbl, - EM_ESCOPE_EO_ADD_QUEUE); -} - -em_status_t -em_eo_add_queue_sync(em_eo_t eo, em_queue_t queue) -{ - /* No sync blocking needed when adding a queue to an EO */ - return eo_add_queue_escope(eo, queue, 0, NULL, - EM_ESCOPE_EO_ADD_QUEUE_SYNC); -} - -em_status_t -em_eo_remove_queue(em_eo_t eo, em_queue_t queue, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const q_elem = queue_elem_get(queue); - em_queue_type_t q_type; - em_status_t ret; - int valid; - - RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, - EM_ERR_BAD_ID, EM_ESCOPE_EO_REMOVE_QUEUE, - "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_EO_REMOVE_QUEUE, - "Not allocated: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - - q_type = em_queue_get_type(queue); - valid = q_type == EM_QUEUE_TYPE_ATOMIC || - q_type == EM_QUEUE_TYPE_PARALLEL || - q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || - q_type == EM_QUEUE_TYPE_LOCAL; - RETURN_ERROR_IF(!valid, EM_ERR_BAD_CONTEXT, EM_ESCOPE_EO_REMOVE_QUEUE, - "Invalid queue type: %" PRI_QTYPE "", q_type); - - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, - "Invalid notif cfg given!"); - RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, - EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE, - "Can't remove Q:%" PRI_QUEUE ", not added to this EO", - queue); - - /* - * Disable the queue if not already done, dispatcher will drop any - * further events. Need to handle events from the queue being processed - * in an EO receive function properly still. - */ - if (q_elem->state == EM_QUEUE_STATE_READY) { - ret = queue_disable(q_elem); - - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, - "queue_disable(Q:%" PRI_QUEUE ") fails", - queue); - } - - /* - * Request each core to run locally the eo_remove_queue_local() function - * and when all are done call eo_remove_queue_done_callback(). - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - return eo_remove_queue_local_req(eo_elem, q_elem, num_notif, notif_tbl); -} - -em_status_t -em_eo_remove_queue_sync(em_eo_t eo, em_queue_t queue) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const q_elem = queue_elem_get(queue); - em_queue_type_t q_type; - em_status_t ret; - int valid; - - RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, - EM_ERR_BAD_ID, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Not allocated: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); - - q_type = em_queue_get_type(queue); - valid = q_type == EM_QUEUE_TYPE_ATOMIC || - q_type == EM_QUEUE_TYPE_PARALLEL || - q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || - q_type == EM_QUEUE_TYPE_LOCAL; - RETURN_ERROR_IF(!valid, EM_ERR_BAD_CONTEXT, - EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Invalid queue type: %" PRI_QTYPE "", q_type); - - RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, - EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Can't remove Q:%" PRI_QUEUE ", not added to this EO", - queue); - - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - /* - * Disable the queue if not already done, dispatcher will drop any - * further events. Need to handle events from the queue being processed - * in an EO receive function properly still. - */ - if (q_elem->state == EM_QUEUE_STATE_READY) { - ret = queue_disable(q_elem); - - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_sync_error; - } - - /* - * Request each core to run locally the eo_remove_queue_sync_local() function - * and when all are done call eo_remove_queue_sync_done_callback. - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - ret = eo_remove_queue_sync_local_req(eo_elem, q_elem); - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_sync_error; - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all conserned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - return EM_OK; - -eo_remove_queue_sync_error: - locm->sync_api.in_progress = false; - - return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Failure: EO:%" PRI_EO " Q:%" PRI_QUEUE "", - eo, queue); -} - -em_status_t -em_eo_remove_queue_all(em_eo_t eo, int delete_queues, - int num_notif, const em_notif_t notif_tbl[]) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_BAD_STATE, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "Not allocated: EO:%" PRI_EO "", eo); - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "Invalid notif cfg given!"); - - ret = queue_disable_all(eo_elem); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, - "queue_disable_all() failed!"); - - /* - * Request each core to run locally the eo_remove_queue_all_local() function - * and when all are done call eo_remove_queue_all_done_callback(). - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - return eo_remove_queue_all_local_req(eo_elem, delete_queues, - num_notif, notif_tbl); -} - -em_status_t -em_eo_remove_queue_all_sync(em_eo_t eo, int delete_queues) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_BAD_STATE, - EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, - "Not allocated: EO:%" PRI_EO "", eo); - - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - ret = queue_disable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_all_sync_error; - - /* - * Request each core to run locally the eo_remove_queue_all_sync_local() function - * and when all are done call eo_remove_queue_all_sync_done_callback(). - * The callback will finally remove the queue from the EO when it's - * known that no core is anymore processing events from that EO/queue. - */ - ret = eo_remove_queue_all_sync_local_req(eo_elem, delete_queues); - if (unlikely(ret != EM_OK)) - goto eo_remove_queue_all_sync_error; - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all conserned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - return EM_OK; - -eo_remove_queue_all_sync_error: - locm->sync_api.in_progress = false; - - return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, - "Failure: EO:%" PRI_EO "", eo); -} - -em_status_t -em_eo_register_error_handler(em_eo_t eo, em_error_handler_t handler) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - - RETURN_ERROR_IF(eo_elem == NULL || handler == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, - "Invalid args: EO:%" PRI_EO " handler:%p", eo, handler); - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, - "EO:%" PRI_EO " not allocated", eo); - - env_spinlock_lock(&eo_elem->lock); - eo_elem->error_handler_func = handler; - env_spinlock_unlock(&eo_elem->lock); - - return EM_OK; -} - -em_status_t -em_eo_unregister_error_handler(em_eo_t eo) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, - EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, - "Invalid EO id %" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_BAD_STATE, - EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, - "EO not allocated:%" PRI_EO "", eo); - - env_spinlock_lock(&eo_elem->lock); - eo_elem->error_handler_func = NULL; - env_spinlock_unlock(&eo_elem->lock); - - return EM_OK; -} - -em_status_t -em_eo_start(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf, - int num_notif, const em_notif_t notif_tbl[]) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, EM_ESCOPE_EO_START, - "Invalid EO id %" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_EO_START, - "EO not allocated:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_START, - "EO invalid state, cannot start:%d", eo_elem->state); - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_START, - "Invalid notif cfg given!"); - - eo_elem->state = EM_EO_STATE_STARTING; - /* This core is in the EO start function: buffer all sent events */ - locm->start_eo_elem = eo_elem; - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO start functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = eo; - - locm->current.q_elem = &tmp_q_elem; - /* Call the global EO start function */ - ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - locm->start_eo_elem = NULL; - - /* Store the return value of the actual EO global start function */ - if (result != NULL) - *result = ret; - - if (unlikely(ret != EM_OK)) { - ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " start func fails:0x%08x", - eo, ret); - /* user error handler might change error from own eo-start */ - if (ret != EM_OK) - goto eo_start_error; - } - - if (eo_elem->start_local_func != NULL) { - /* - * Notifications sent when the local start functions - * have completed. - */ - ret = eo_start_local_req(eo_elem, num_notif, notif_tbl); - - if (unlikely(ret != EM_OK)) { - INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " local start func fails", - eo); - /* Can't allow user err handler to change error here */ - goto eo_start_error; - } - /* - * Note: Return here, queues will be enabled after the local - * start funcs complete. - * EO state changed to 'EM_EO_STATE_RUNNING' after successful - * completion of EO local starts on all cores. - */ - return EM_OK; - } - - /* - * Enable all the EO's queues. - * Note: if local start functions are given then enable can be done only - * after they have been run on each core. - */ - ret = queue_enable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_start_error; - - eo_elem->state = EM_EO_STATE_RUNNING; - - /* Send events buffered during the EO-start/local-start functions */ - eo_start_send_buffered_events(eo_elem); - - if (num_notif > 0) { - /* Send notifications if requested */ - ret = send_notifs(num_notif, notif_tbl); - - if (unlikely(ret != EM_OK)) { - ret = INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, - "EO:%" PRI_EO " send notif fails", - eo); - /* user error handler might change error */ - if (ret != EM_OK) - goto eo_start_error; - } - } - - return EM_OK; - -eo_start_error: - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; -} - -em_status_t -em_eo_start_sync(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, EM_ESCOPE_EO_START_SYNC, - "Invalid EO id %" PRI_EO "", eo); - RETURN_ERROR_IF(!eo_allocated(eo_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_EO_START_SYNC, - "EO not allocated:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_START_SYNC, - "EO invalid state, cannot start:%d", eo_elem->state); - - eo_elem->state = EM_EO_STATE_STARTING; - /* This core is in the EO start function: buffer all sent events */ - locm->start_eo_elem = eo_elem; - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO start functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = eo; - locm->current.q_elem = &tmp_q_elem; - /* Call the global EO start function */ - ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - locm->start_eo_elem = NULL; - - /* Store the return value of the actual EO global start function */ - if (result != NULL) - *result = ret; - - if (unlikely(ret != EM_OK)) { - ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START_SYNC, - "EO:%" PRI_EO " start func fails:0x%08x", - eo, ret); - /* user error handler might change error from own eo-start */ - if (ret != EM_OK) { - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; - } - } - - if (eo_elem->start_local_func != NULL) { - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - locm->start_eo_elem = eo_elem; - locm->current.q_elem = &tmp_q_elem; - /* Call the local start on this core */ - ret = eo_elem->start_local_func(eo_elem->eo_ctx, eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - locm->start_eo_elem = NULL; - - if (unlikely(ret != EM_OK)) { - INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, - "EO:%" PRI_EO " local start func fails", eo); - /* Can't allow user err handler to change error here */ - goto eo_start_sync_error; - } - - ret = eo_start_sync_local_req(eo_elem); - if (unlikely(ret != EM_OK)) { - INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, - "EO:%" PRI_EO " eo_start_sync_local_req", eo); - /* Can't allow user err handler to change error here */ - goto eo_start_sync_error; - } - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all conserned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - /* Send events buffered during the EO-start/local-start funcs */ - eo_start_send_buffered_events(eo_elem); - /* - * EO state changed to 'EO_STATE_RUNNING' after successful - * completion of EO local starts on all cores. - */ - return EM_OK; - } - - /* - * Enable all the EO's queues. - * Note: if local start functions are given then enable can be done only - * after they have been run on each core. - */ - ret = queue_enable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_start_sync_error; - - eo_elem->state = EM_EO_STATE_RUNNING; - - /* Send events buffered during the EO-start/local-start functions */ - eo_start_send_buffered_events(eo_elem); - return EM_OK; - -eo_start_sync_error: - locm->sync_api.in_progress = false; - /* roll back state to allow EO delete */ - eo_elem->state = EM_EO_STATE_ERROR; - return ret; -} - -em_status_t -em_eo_stop(em_eo_t eo, int num_notif, const em_notif_t notif_tbl[]) -{ - eo_elem_t *const eo_elem = eo_elem_get(eo); - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), - EM_ERR_BAD_ID, EM_ESCOPE_EO_STOP, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP, - "EO invalid state, cannot stop:%d", eo_elem->state); - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP, - "Invalid notif cfg given!"); - - eo_elem->state = EM_EO_STATE_STOPPING; - - /* - * Disable all queues. - * It doesn't matter if some of the queues are already disabled. - */ - queue_disable_all(eo_elem); - - /* - * Notifications sent when the local stop functions - * have completed. EO global stop called when all local stops have - * been completed. EO state changed to 'stopped' only after completing - * the EO global stop function. - */ - ret = eo_stop_local_req(eo_elem, num_notif, notif_tbl); - - if (unlikely(ret != EM_OK)) { - eo_elem->state = EM_EO_STATE_ERROR; - INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP, - "EO:%" PRI_EO " local stop func fails", eo); - /* Can't allow user err handler to change error here */ - return ret; - } - - return EM_OK; -} - -em_status_t -em_eo_stop_sync(em_eo_t eo) -{ - em_locm_t *const locm = &em_locm; - eo_elem_t *const eo_elem = eo_elem_get(eo); - queue_elem_t *const save_q_elem = locm->current.q_elem; - queue_elem_t tmp_q_elem; - em_status_t ret; - - RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), - EM_ERR_BAD_ID, EM_ESCOPE_EO_STOP_SYNC, - "Invalid EO:%" PRI_EO "", eo); - RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, - EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP_SYNC, - "EO invalid state, cannot stop:%d", eo_elem->state); - - /* Mark that a sync-API call is in progress */ - locm->sync_api.in_progress = true; - - eo_elem->state = EM_EO_STATE_STOPPING; - - /* - * Disable all queues. - * It doesn't matter if some of the queues are already disabled. - */ - ret = queue_disable_all(eo_elem); - if (unlikely(ret != EM_OK)) - goto eo_stop_sync_error; - - /* - * Use a tmp q_elem as the 'current q_elem' to enable calling - * em_eo_current() from the EO stop functions. - * Before returning, restore the original 'current q_elem' from - * 'save_q_elem'. - */ - memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); - tmp_q_elem.eo = eo; - - if (eo_elem->stop_local_func != NULL) { - locm->current.q_elem = &tmp_q_elem; - /* Call the local stop on this core */ - ret = eo_elem->stop_local_func(eo_elem->eo_ctx, eo_elem->eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - if (unlikely(ret != EM_OK)) - goto eo_stop_sync_error; - } - - /* - * Notifications sent when the local stop functions have completed. - * EO global stop called when all local stops have been completed. - * EO state changed to 'stopped' only after completing the EO global - * stop function. - */ - ret = eo_stop_sync_local_req(eo_elem); - - if (unlikely(ret != EM_OK)) { - eo_elem->state = EM_EO_STATE_ERROR; - INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, - "EO:%" PRI_EO " local stop func fails", eo); - /* Can't allow user err handler to change error here */ - goto eo_stop_sync_error; - } - - /* - * Poll the core-local unscheduled control-queue for events. - * These events request the core to do a core-local operation (or nop). - * Poll and handle events until 'locm->sync_api.in_progress == false' - * indicating that this sync-API is 'done' on all conserned cores. - */ - while (locm->sync_api.in_progress) - poll_unsched_ctrl_queue(); - - /* Change state here to allow em_eo_delete() from EO global stop */ - eo_elem->state = EM_EO_STATE_CREATED; /* == stopped */ - - locm->current.q_elem = &tmp_q_elem; - /* - * Call the Global EO stop function now that all - * EO local stop functions are done. - */ - ret = eo_elem->stop_func(eo_elem->eo_ctx, eo); - /* Restore the original 'current q_elem' */ - locm->current.q_elem = save_q_elem; - - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP_SYNC, - "EO:%" PRI_EO " stop-func failed", eo); - /* - * Note: the EO might not be available after this if the EO global stop - * called em_eo_delete()! - */ - return EM_OK; - -eo_stop_sync_error: - locm->sync_api.in_progress = false; - return INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, - "Failure: EO:%" PRI_EO "", eo); -} - -em_eo_t -em_eo_current(void) -{ - return eo_current(); -} - -void * -em_eo_get_context(em_eo_t eo) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - em_eo_state_t eo_state; - - if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_GET_CONTEXT, - "Invalid EO:%" PRI_EO "", eo); - return NULL; - } - - eo_state = eo_elem->state; - if (unlikely(eo_state < EM_EO_STATE_CREATED)) { - INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_EO_GET_CONTEXT, - "Invalid EO state: EO:%" PRI_EO " state:%d", - eo, eo_state); - return NULL; - } - - return eo_elem->eo_ctx; -} - -em_eo_state_t -em_eo_get_state(em_eo_t eo) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - - if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_GET_STATE, - "Invalid EO:%" PRI_EO "", eo); - return EM_EO_STATE_UNDEF; - } - - return eo_elem->state; -} - -em_eo_t -em_eo_get_first(unsigned int *num) -{ - _eo_tbl_iter_idx = 0; /* reset iteration */ - const unsigned int eo_cnt = eo_count(); - - if (num) - *num = eo_cnt; - - if (eo_cnt == 0) { - _eo_tbl_iter_idx = EM_MAX_EOS; /* UNDEF = _get_next() */ - return EM_EO_UNDEF; - } - - /* find first */ - while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { - _eo_tbl_iter_idx++; - if (_eo_tbl_iter_idx >= EM_MAX_EOS) - return EM_EO_UNDEF; - } - - return eo_idx2hdl(_eo_tbl_iter_idx); -} - -em_eo_t -em_eo_get_next(void) -{ - if (_eo_tbl_iter_idx >= EM_MAX_EOS - 1) - return EM_EO_UNDEF; - - _eo_tbl_iter_idx++; - - /* find next */ - while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { - _eo_tbl_iter_idx++; - if (_eo_tbl_iter_idx >= EM_MAX_EOS) - return EM_EO_UNDEF; - } - - return eo_idx2hdl(_eo_tbl_iter_idx); -} - -em_queue_t -em_eo_queue_get_first(unsigned int *num, em_eo_t eo) -{ - const eo_elem_t *eo_elem = eo_elem_get(eo); - - if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_QUEUE_GET_FIRST, - "Invalid EO:%" PRI_EO "", eo); - if (num) - *num = 0; - return EM_QUEUE_UNDEF; - } - - const unsigned int num_queues = env_atomic32_get(&eo_elem->num_queues); - - if (num) - *num = num_queues; - - if (num_queues == 0) { - _eo_q_iter_idx = EM_MAX_QUEUES; /* UNDEF = _get_next() */ - return EM_QUEUE_UNDEF; - } - - /* - * An 'eo_elem' contains a linked list with all it's queues. That list - * might be modified while processing this iteration, so instead we just - * go through the whole queue table. - * This is potentially a slow implementation and perhaps worth - * re-thinking? - */ - const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; - - _eo_q_iter_idx = 0; /* reset list */ - _eo_q_iter_eo = eo; - - /* find first */ - while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || - queue_tbl->queue_elem[_eo_q_iter_idx].eo != _eo_q_iter_eo) { - _eo_q_iter_idx++; - if (_eo_q_iter_idx >= EM_MAX_QUEUES) - return EM_QUEUE_UNDEF; - } - - return queue_idx2hdl(_eo_q_iter_idx); -} - -em_queue_t -em_eo_queue_get_next(void) -{ - if (_eo_q_iter_idx >= EM_MAX_QUEUES - 1) - return EM_QUEUE_UNDEF; - - _eo_q_iter_idx++; - - const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; - - /* find next */ - while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || - queue_tbl->queue_elem[_eo_q_iter_idx].eo != _eo_q_iter_eo) { - _eo_q_iter_idx++; - if (_eo_q_iter_idx >= EM_MAX_QUEUES) - return EM_QUEUE_UNDEF; - } - - return queue_idx2hdl(_eo_q_iter_idx); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* Per core (thread) state of em_eo_get_next() */ +static ENV_LOCAL unsigned int _eo_tbl_iter_idx; +/* Per core (thread) state of em_eo_queue_get_next() */ +static ENV_LOCAL unsigned int _eo_q_iter_idx; +static ENV_LOCAL em_eo_t _eo_q_iter_eo; + +em_eo_t +em_eo_create(const char *name, + em_start_func_t start, + em_start_local_func_t local_start, + em_stop_func_t stop, + em_stop_local_func_t local_stop, + em_receive_func_t receive, + const void *eo_ctx) +{ + em_eo_t eo; + eo_elem_t *eo_elem; + + if (unlikely(start == NULL || stop == NULL || receive == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_CREATE, + "Mandatory EO function pointer(s) NULL!"); + return EM_EO_UNDEF; + } + + eo = eo_alloc(); + if (unlikely(eo == EM_EO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_CREATE, + "EO alloc failed!"); + return EM_EO_UNDEF; + } + + eo_elem = eo_elem_get(eo); + if (unlikely(eo_elem == NULL)) { + /* Fatal since eo_alloc() returned 'ok', should never happen */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), EM_ESCOPE_EO_CREATE, + "Invalid EO:%" PRI_EO "", eo); + return EM_EO_UNDEF; + } + + env_spinlock_lock(&eo_elem->lock); + + /* Store the name */ + if (name != NULL) { + strncpy(eo_elem->name, name, sizeof(eo_elem->name)); + eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; + } else { + eo_elem->name[0] = '\0'; + } + + /* EO's queue list init */ + list_init(&eo_elem->queue_list); + /* EO start: event buffering init */ + eo_elem->stash = ODP_STASH_INVALID; + + eo_elem->state = EM_EO_STATE_CREATED; + eo_elem->start_func = start; + eo_elem->start_local_func = local_start; + eo_elem->stop_func = stop; + eo_elem->stop_local_func = local_stop; + + eo_elem->use_multi_rcv = EM_FALSE; + eo_elem->max_events = 1; + eo_elem->receive_func = receive; + eo_elem->receive_multi_func = NULL; + + eo_elem->error_handler_func = NULL; + eo_elem->eo_ctx = (void *)(uintptr_t)eo_ctx; + eo_elem->eo = eo; + env_atomic32_init(&eo_elem->num_queues); + + env_spinlock_unlock(&eo_elem->lock); + + return eo; +} + +void em_eo_multircv_param_init(em_eo_multircv_param_t *param) +{ + if (unlikely(!param)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_EO_MULTIRCV_PARAM_INIT, + "Param pointer NULL!"); + return; + } + memset(param, 0, sizeof(em_eo_multircv_param_t)); + param->max_events = EM_EO_MULTIRCV_MAX_EVENTS; + param->__internal_check = EM_CHECK_INIT_CALLED; +} + +em_eo_t +em_eo_create_multircv(const char *name, const em_eo_multircv_param_t *param) +{ + em_eo_t eo; + eo_elem_t *eo_elem; + int max_events; + + if (unlikely(!param || + param->__internal_check != EM_CHECK_INIT_CALLED)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EO_CREATE_MULTIRCV, + "Invalid param ptr:\n" + "Use em_eo_multircv_param_init() before create"); + return EM_EO_UNDEF; + } + + if (unlikely(!param->start || !param->stop || !param->receive_multi)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_CREATE_MULTIRCV, + "Mandatory EO function pointer(s) NULL!"); + return EM_EO_UNDEF; + } + + if (unlikely(param->max_events < 0)) { + INTERNAL_ERROR(EM_ERR_TOO_SMALL, EM_ESCOPE_EO_CREATE_MULTIRCV, + "Max number of events too small:%d", + param->max_events); + return EM_EO_UNDEF; + } + max_events = param->max_events; + if (max_events == 0) /* user requests default value */ + max_events = EM_EO_MULTIRCV_MAX_EVENTS; + + eo = eo_alloc(); + if (unlikely(eo == EM_EO_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_CREATE_MULTIRCV, + "EO alloc failed!"); + return EM_EO_UNDEF; + } + + eo_elem = eo_elem_get(eo); + if (unlikely(eo_elem == NULL)) { + /* Fatal since eo_alloc() returned 'ok', should never happen */ + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_ID), + EM_ESCOPE_EO_CREATE_MULTIRCV, + "Invalid EO:%" PRI_EO "", eo); + return EM_EO_UNDEF; + } + + env_spinlock_lock(&eo_elem->lock); + + /* Store the name */ + if (name) { + strncpy(eo_elem->name, name, sizeof(eo_elem->name)); + eo_elem->name[sizeof(eo_elem->name) - 1] = '\0'; + } else { + eo_elem->name[0] = '\0'; + } + + /* EO's queue list init */ + list_init(&eo_elem->queue_list); + /* EO start: event buffering init */ + eo_elem->stash = ODP_STASH_INVALID; + + eo_elem->state = EM_EO_STATE_CREATED; + eo_elem->start_func = param->start; + eo_elem->start_local_func = param->local_start; + eo_elem->stop_func = param->stop; + eo_elem->stop_local_func = param->local_stop; + + eo_elem->use_multi_rcv = EM_TRUE; + eo_elem->max_events = max_events; + eo_elem->receive_func = NULL; + eo_elem->receive_multi_func = param->receive_multi; + + eo_elem->error_handler_func = NULL; + eo_elem->eo_ctx = (void *)(uintptr_t)param->eo_ctx; + eo_elem->eo = eo; + env_atomic32_init(&eo_elem->num_queues); + + env_spinlock_unlock(&eo_elem->lock); + + return eo; +} + +em_status_t +em_eo_delete(em_eo_t eo) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t status; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, EM_ESCOPE_EO_DELETE, + "Invalid EO:%" PRI_EO "!", eo); + + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_EO_DELETE, + "EO not allocated:%" PRI_EO "", eo); + + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED && + eo_elem->state != EM_EO_STATE_ERROR, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_DELETE, + "EO invalid state, cannot delete:%d", eo_elem->state); + + status = eo_delete_queue_all(eo_elem); + + RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, + "EO delete: delete queues failed!"); + + /* Free EO back into the eo-pool and mark state=EO_STATE_UNDEF */ + status = eo_free(eo); + RETURN_ERROR_IF(status != EM_OK, status, EM_ESCOPE_EO_DELETE, + "EO delete failed!"); + + return status; +} + +size_t +em_eo_get_name(em_eo_t eo, char *name, size_t maxlen) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + + if (name == NULL || maxlen == 0) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EO_GET_NAME, + "Invalid ptr or maxlen (name=0x%" PRIx64 ", maxlen=%zu)", + name, maxlen); + return 0; + } + + name[0] = '\0'; + + if (unlikely(eo_elem == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_GET_NAME, + "Invalid EO id %" PRI_EO "", eo); + return 0; + } + + if (unlikely(!eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_EO_GET_NAME, + "EO not allocated:%" PRI_EO "", eo); + return 0; + } + + return eo_get_name(eo_elem, name, maxlen); +} + +em_eo_t +em_eo_find(const char *name) +{ + if (name && *name) { + for (int i = 0; i < EM_MAX_EOS; i++) { + const eo_elem_t *eo_elem = &em_shm->eo_tbl.eo_elem[i]; + + if (eo_elem->state != EM_EO_STATE_UNDEF && + !strncmp(name, eo_elem->name, EM_EO_NAME_LEN - 1)) + return eo_elem->eo; + } + } + return EM_EO_UNDEF; +} + +/** + * @brief Helper for em_eo_add_queue/_sync() + */ +static em_status_t +eo_add_queue_escope(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[], + em_escope_t escope) +{ eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const q_elem = queue_elem_get(queue); + em_queue_type_t q_type; + em_status_t err; + int valid; + + RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, + EM_ERR_BAD_ARG, escope, + "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), + EM_ERR_BAD_ARG, escope, + "Not allocated: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + + q_type = em_queue_get_type(queue); + valid = q_type == EM_QUEUE_TYPE_ATOMIC || + q_type == EM_QUEUE_TYPE_PARALLEL || + q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || + q_type == EM_QUEUE_TYPE_LOCAL; + RETURN_ERROR_IF(!valid, EM_ERR_BAD_CONTEXT, escope, + "Invalid queue type: %" PRI_QTYPE "", q_type); + + if (num_notif > 0) { + err = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(err != EM_OK, err, escope, + "Invalid notif cfg given!"); + } + + err = eo_add_queue(eo_elem, q_elem); + RETURN_ERROR_IF(err != EM_OK, err, escope, + "eo_add_queue(Q:%" PRI_QUEUE ") fails", queue); + + if (eo_elem->state == EM_EO_STATE_RUNNING) { + err = queue_enable(q_elem); /* otherwise enabled in eo-start */ + RETURN_ERROR_IF(err != EM_OK, err, escope, + "queue_enable(Q:%" PRI_QUEUE ") fails", queue); + } + + if (num_notif > 0) { + /* Send notifications if requested */ + err = send_notifs(num_notif, notif_tbl); + RETURN_ERROR_IF(err != EM_OK, err, escope, + "EO:%" PRI_EO " send notif fails", eo); + } + + return EM_OK; +} + +em_status_t +em_eo_add_queue(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[]) +{ + return eo_add_queue_escope(eo, queue, num_notif, notif_tbl, + EM_ESCOPE_EO_ADD_QUEUE); +} + +em_status_t +em_eo_add_queue_sync(em_eo_t eo, em_queue_t queue) +{ + /* No sync blocking needed when adding a queue to an EO */ + return eo_add_queue_escope(eo, queue, 0, NULL, + EM_ESCOPE_EO_ADD_QUEUE_SYNC); +} + +em_status_t +em_eo_remove_queue(em_eo_t eo, em_queue_t queue, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const q_elem = queue_elem_get(queue); + em_queue_type_t q_type; + em_status_t ret; + int valid; + + RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, + EM_ERR_BAD_ID, EM_ESCOPE_EO_REMOVE_QUEUE, + "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_EO_REMOVE_QUEUE, + "Not allocated: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + + q_type = em_queue_get_type(queue); + valid = q_type == EM_QUEUE_TYPE_ATOMIC || + q_type == EM_QUEUE_TYPE_PARALLEL || + q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || + q_type == EM_QUEUE_TYPE_LOCAL; + RETURN_ERROR_IF(!valid, EM_ERR_BAD_CONTEXT, EM_ESCOPE_EO_REMOVE_QUEUE, + "Invalid queue type: %" PRI_QTYPE "", q_type); + + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, + "Invalid notif cfg given!"); + RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, + EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE, + "Can't remove Q:%" PRI_QUEUE ", not added to this EO", + queue); + + /* + * Disable the queue if not already done, dispatcher will drop any + * further events. Need to handle events from the queue being processed + * in an EO receive function properly still. + */ + if (q_elem->state == EM_QUEUE_STATE_READY) { + ret = queue_disable(q_elem); + + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE, + "queue_disable(Q:%" PRI_QUEUE ") fails", + queue); + } + + /* + * Request each core to run locally the eo_remove_queue_local() function + * and when all are done call eo_remove_queue_done_callback(). + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + return eo_remove_queue_local_req(eo_elem, q_elem, num_notif, notif_tbl); +} + +em_status_t +em_eo_remove_queue_sync(em_eo_t eo, em_queue_t queue) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const q_elem = queue_elem_get(queue); + em_queue_type_t q_type; + em_status_t ret; + int valid; + + RETURN_ERROR_IF(eo_elem == NULL || q_elem == NULL, + EM_ERR_BAD_ID, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Invalid args: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + RETURN_ERROR_IF(!eo_allocated(eo_elem) || !queue_allocated(q_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Not allocated: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); + + q_type = em_queue_get_type(queue); + valid = q_type == EM_QUEUE_TYPE_ATOMIC || + q_type == EM_QUEUE_TYPE_PARALLEL || + q_type == EM_QUEUE_TYPE_PARALLEL_ORDERED || + q_type == EM_QUEUE_TYPE_LOCAL; + RETURN_ERROR_IF(!valid, EM_ERR_BAD_CONTEXT, + EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Invalid queue type: %" PRI_QTYPE "", q_type); + + RETURN_ERROR_IF(eo_elem != q_elem->eo_elem, + EM_ERR_BAD_POINTER, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Can't remove Q:%" PRI_QUEUE ", not added to this EO", + queue); + + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + /* + * Disable the queue if not already done, dispatcher will drop any + * further events. Need to handle events from the queue being processed + * in an EO receive function properly still. + */ + if (q_elem->state == EM_QUEUE_STATE_READY) { + ret = queue_disable(q_elem); + + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_sync_error; + } + + /* + * Request each core to run locally the eo_remove_queue_sync_local() function + * and when all are done call eo_remove_queue_sync_done_callback. + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + ret = eo_remove_queue_sync_local_req(eo_elem, q_elem); + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_sync_error; + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all conserned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + return EM_OK; + +eo_remove_queue_sync_error: + locm->sync_api.in_progress = false; + + return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Failure: EO:%" PRI_EO " Q:%" PRI_QUEUE "", + eo, queue); +} + +em_status_t +em_eo_remove_queue_all(em_eo_t eo, int delete_queues, + int num_notif, const em_notif_t notif_tbl[]) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_BAD_STATE, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "Not allocated: EO:%" PRI_EO "", eo); + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "Invalid notif cfg given!"); + + ret = queue_disable_all(eo_elem); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_REMOVE_QUEUE_ALL, + "queue_disable_all() failed!"); + + /* + * Request each core to run locally the eo_remove_queue_all_local() function + * and when all are done call eo_remove_queue_all_done_callback(). + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + return eo_remove_queue_all_local_req(eo_elem, delete_queues, + num_notif, notif_tbl); +} + +em_status_t +em_eo_remove_queue_all_sync(em_eo_t eo, int delete_queues) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_BAD_STATE, + EM_ESCOPE_EO_REMOVE_QUEUE_ALL_SYNC, + "Not allocated: EO:%" PRI_EO "", eo); + + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + ret = queue_disable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_all_sync_error; + + /* + * Request each core to run locally the eo_remove_queue_all_sync_local() function + * and when all are done call eo_remove_queue_all_sync_done_callback(). + * The callback will finally remove the queue from the EO when it's + * known that no core is anymore processing events from that EO/queue. + */ + ret = eo_remove_queue_all_sync_local_req(eo_elem, delete_queues); + if (unlikely(ret != EM_OK)) + goto eo_remove_queue_all_sync_error; + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all conserned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + return EM_OK; + +eo_remove_queue_all_sync_error: + locm->sync_api.in_progress = false; + + return INTERNAL_ERROR(ret, EM_ESCOPE_EO_REMOVE_QUEUE_SYNC, + "Failure: EO:%" PRI_EO "", eo); +} + +em_status_t +em_eo_register_error_handler(em_eo_t eo, em_error_handler_t handler) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + + RETURN_ERROR_IF(eo_elem == NULL || handler == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, + "Invalid args: EO:%" PRI_EO " handler:%p", eo, handler); + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_EO_REGISTER_ERROR_HANDLER, + "EO:%" PRI_EO " not allocated", eo); + + env_spinlock_lock(&eo_elem->lock); + eo_elem->error_handler_func = handler; + env_spinlock_unlock(&eo_elem->lock); + + return EM_OK; +} + +em_status_t +em_eo_unregister_error_handler(em_eo_t eo) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ARG, + EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, + "Invalid EO id %" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), EM_ERR_BAD_STATE, + EM_ESCOPE_EO_UNREGISTER_ERROR_HANDLER, + "EO not allocated:%" PRI_EO "", eo); + + env_spinlock_lock(&eo_elem->lock); + eo_elem->error_handler_func = NULL; + env_spinlock_unlock(&eo_elem->lock); + + return EM_OK; +} + +em_status_t +em_eo_start(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf, + int num_notif, const em_notif_t notif_tbl[]) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, EM_ESCOPE_EO_START, + "Invalid EO id %" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_EO_START, + "EO not allocated:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_START, + "EO invalid state, cannot start:%d", eo_elem->state); + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_START, + "Invalid notif cfg given!"); + + eo_elem->state = EM_EO_STATE_STARTING; + + /* Create a stash to buffer events sent during EO-start */ + eo_elem->stash = eo_start_stash_create(eo_elem->name); + if (unlikely(eo_elem->stash == ODP_STASH_INVALID)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " start stash creation fails", eo); + goto eo_start_error; + } + /* This core is in the EO start function: buffer all sent events */ + locm->start_eo_elem = eo_elem; + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO start functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = eo; + + locm->current.q_elem = &tmp_q_elem; + /* Call the global EO start function */ + ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + locm->start_eo_elem = NULL; + + /* Store the return value of the actual EO global start function */ + if (result != NULL) + *result = ret; + + if (unlikely(ret != EM_OK)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " start func fails:0x%08x", + eo, ret); + /* user error handler might change error from own eo-start */ + if (ret != EM_OK) + goto eo_start_error; + } + + if (eo_elem->start_local_func != NULL) { + /* + * Notifications sent when the local start functions + * have completed. + */ + ret = eo_start_local_req(eo_elem, num_notif, notif_tbl); + + if (unlikely(ret != EM_OK)) { + INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " local start func fails", + eo); + /* Can't allow user err handler to change error here */ + goto eo_start_error; + } + /* + * Note: Return here, queues will be enabled after the local + * start funcs complete. + * EO state changed to 'EM_EO_STATE_RUNNING' after successful + * completion of EO local starts on all cores. + */ + return EM_OK; + } + + /* + * Enable all the EO's queues. + * Note: if local start functions are given then enable can be done only + * after they have been run on each core. + */ + ret = queue_enable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_start_error; + + eo_elem->state = EM_EO_STATE_RUNNING; + + /* Send events buffered during the EO-start/local-start functions */ + eo_start_send_buffered_events(eo_elem); + + if (num_notif > 0) { + /* Send notifications if requested */ + ret = send_notifs(num_notif, notif_tbl); + + if (unlikely(ret != EM_OK)) { + ret = INTERNAL_ERROR(ret, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " send notif fails", + eo); + /* user error handler might change error */ + if (ret != EM_OK) + goto eo_start_error; + } + } + + return EM_OK; + +eo_start_error: + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; +} + +em_status_t +em_eo_start_sync(em_eo_t eo, em_status_t *result, const em_eo_conf_t *conf) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL, EM_ERR_BAD_ID, EM_ESCOPE_EO_START_SYNC, + "Invalid EO id %" PRI_EO "", eo); + RETURN_ERROR_IF(!eo_allocated(eo_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_EO_START_SYNC, + "EO not allocated:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_CREATED, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_START_SYNC, + "EO invalid state, cannot start:%d", eo_elem->state); + + eo_elem->state = EM_EO_STATE_STARTING; + + /* Create a stash to buffer events sent during EO-start */ + eo_elem->stash = eo_start_stash_create(eo_elem->name); + if (unlikely(eo_elem->stash == ODP_STASH_INVALID)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START, + "EO:%" PRI_EO " start stash creation fails", eo); + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; + } + /* This core is in the EO start function: buffer all sent events */ + locm->start_eo_elem = eo_elem; + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO start functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = eo; + locm->current.q_elem = &tmp_q_elem; + /* Call the global EO start function */ + ret = eo_elem->start_func(eo_elem->eo_ctx, eo, conf); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + locm->start_eo_elem = NULL; + + /* Store the return value of the actual EO global start function */ + if (result != NULL) + *result = ret; + + if (unlikely(ret != EM_OK)) { + ret = INTERNAL_ERROR(EM_ERR, EM_ESCOPE_EO_START_SYNC, + "EO:%" PRI_EO " start func fails:0x%08x", + eo, ret); + /* user error handler might change error from own eo-start */ + if (ret != EM_OK) { + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; + } + } + + if (eo_elem->start_local_func != NULL) { + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + locm->start_eo_elem = eo_elem; + locm->current.q_elem = &tmp_q_elem; + /* Call the local start on this core */ + ret = eo_elem->start_local_func(eo_elem->eo_ctx, eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + locm->start_eo_elem = NULL; + + if (unlikely(ret != EM_OK)) { + INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, + "EO:%" PRI_EO " local start func fails", eo); + /* Can't allow user err handler to change error here */ + goto eo_start_sync_error; + } + + ret = eo_start_sync_local_req(eo_elem); + if (unlikely(ret != EM_OK)) { + INTERNAL_ERROR(ret, EM_ESCOPE_EO_START_SYNC, + "EO:%" PRI_EO " eo_start_sync_local_req", eo); + /* Can't allow user err handler to change error here */ + goto eo_start_sync_error; + } + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all conserned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + /* Send events buffered during the EO-start/local-start funcs */ + eo_start_send_buffered_events(eo_elem); + /* + * EO state changed to 'EO_STATE_RUNNING' after successful + * completion of EO local starts on all cores. + */ + return EM_OK; + } + + /* + * Enable all the EO's queues. + * Note: if local start functions are given then enable can be done only + * after they have been run on each core. + */ + ret = queue_enable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_start_sync_error; + + eo_elem->state = EM_EO_STATE_RUNNING; + + /* Send events buffered during the EO-start/local-start functions */ + eo_start_send_buffered_events(eo_elem); + return EM_OK; + +eo_start_sync_error: + locm->sync_api.in_progress = false; + /* roll back state to allow EO delete */ + eo_elem->state = EM_EO_STATE_ERROR; + return ret; +} + +em_status_t +em_eo_stop(em_eo_t eo, int num_notif, const em_notif_t notif_tbl[]) +{ + eo_elem_t *const eo_elem = eo_elem_get(eo); + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), + EM_ERR_BAD_ID, EM_ESCOPE_EO_STOP, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP, + "EO invalid state, cannot stop:%d", eo_elem->state); + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP, + "Invalid notif cfg given!"); + + eo_elem->state = EM_EO_STATE_STOPPING; + + /* + * Disable all queues. + * It doesn't matter if some of the queues are already disabled. + */ + queue_disable_all(eo_elem); + + /* + * Notifications sent when the local stop functions + * have completed. EO global stop called when all local stops have + * been completed. EO state changed to 'stopped' only after completing + * the EO global stop function. + */ + ret = eo_stop_local_req(eo_elem, num_notif, notif_tbl); + + if (unlikely(ret != EM_OK)) { + eo_elem->state = EM_EO_STATE_ERROR; + INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP, + "EO:%" PRI_EO " local stop func fails", eo); + /* Can't allow user err handler to change error here */ + return ret; + } + + return EM_OK; +} + +em_status_t +em_eo_stop_sync(em_eo_t eo) +{ + em_locm_t *const locm = &em_locm; + eo_elem_t *const eo_elem = eo_elem_get(eo); + queue_elem_t *const save_q_elem = locm->current.q_elem; + queue_elem_t tmp_q_elem; + em_status_t ret; + + RETURN_ERROR_IF(eo_elem == NULL || !eo_allocated(eo_elem), + EM_ERR_BAD_ID, EM_ESCOPE_EO_STOP_SYNC, + "Invalid EO:%" PRI_EO "", eo); + RETURN_ERROR_IF(eo_elem->state != EM_EO_STATE_RUNNING, + EM_ERR_BAD_STATE, EM_ESCOPE_EO_STOP_SYNC, + "EO invalid state, cannot stop:%d", eo_elem->state); + + /* Mark that a sync-API call is in progress */ + locm->sync_api.in_progress = true; + + eo_elem->state = EM_EO_STATE_STOPPING; + + /* + * Disable all queues. + * It doesn't matter if some of the queues are already disabled. + */ + ret = queue_disable_all(eo_elem); + if (unlikely(ret != EM_OK)) + goto eo_stop_sync_error; + + /* + * Use a tmp q_elem as the 'current q_elem' to enable calling + * em_eo_current() from the EO stop functions. + * Before returning, restore the original 'current q_elem' from + * 'save_q_elem'. + */ + memset(&tmp_q_elem, 0, sizeof(tmp_q_elem)); + tmp_q_elem.eo = eo; + + if (eo_elem->stop_local_func != NULL) { + locm->current.q_elem = &tmp_q_elem; + /* Call the local stop on this core */ + ret = eo_elem->stop_local_func(eo_elem->eo_ctx, eo_elem->eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + if (unlikely(ret != EM_OK)) + goto eo_stop_sync_error; + } + + /* + * Notifications sent when the local stop functions have completed. + * EO global stop called when all local stops have been completed. + * EO state changed to 'stopped' only after completing the EO global + * stop function. + */ + ret = eo_stop_sync_local_req(eo_elem); + + if (unlikely(ret != EM_OK)) { + eo_elem->state = EM_EO_STATE_ERROR; + INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, + "EO:%" PRI_EO " local stop func fails", eo); + /* Can't allow user err handler to change error here */ + goto eo_stop_sync_error; + } + + /* + * Poll the core-local unscheduled control-queue for events. + * These events request the core to do a core-local operation (or nop). + * Poll and handle events until 'locm->sync_api.in_progress == false' + * indicating that this sync-API is 'done' on all conserned cores. + */ + while (locm->sync_api.in_progress) + poll_unsched_ctrl_queue(); + + /* Change state here to allow em_eo_delete() from EO global stop */ + eo_elem->state = EM_EO_STATE_CREATED; /* == stopped */ + + locm->current.q_elem = &tmp_q_elem; + /* + * Call the Global EO stop function now that all + * EO local stop functions are done. + */ + ret = eo_elem->stop_func(eo_elem->eo_ctx, eo); + /* Restore the original 'current q_elem' */ + locm->current.q_elem = save_q_elem; + + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EO_STOP_SYNC, + "EO:%" PRI_EO " stop-func failed", eo); + /* + * Note: the EO might not be available after this if the EO global stop + * called em_eo_delete()! + */ + return EM_OK; + +eo_stop_sync_error: + locm->sync_api.in_progress = false; + return INTERNAL_ERROR(ret, EM_ESCOPE_EO_STOP_SYNC, + "Failure: EO:%" PRI_EO "", eo); +} + +em_eo_t +em_eo_current(void) +{ + return eo_current(); +} + +void * +em_eo_get_context(em_eo_t eo) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + em_eo_state_t eo_state; + + if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_GET_CONTEXT, + "Invalid EO:%" PRI_EO "", eo); + return NULL; + } + + eo_state = eo_elem->state; + if (unlikely(eo_state < EM_EO_STATE_CREATED)) { + INTERNAL_ERROR(EM_ERR_BAD_STATE, EM_ESCOPE_EO_GET_CONTEXT, + "Invalid EO state: EO:%" PRI_EO " state:%d", + eo, eo_state); + return NULL; + } + + return eo_elem->eo_ctx; +} + +em_eo_state_t +em_eo_get_state(em_eo_t eo) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + + if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_GET_STATE, + "Invalid EO:%" PRI_EO "", eo); + return EM_EO_STATE_UNDEF; + } + + return eo_elem->state; +} + +em_eo_t +em_eo_get_first(unsigned int *num) +{ + _eo_tbl_iter_idx = 0; /* reset iteration */ + const unsigned int eo_cnt = eo_count(); + + if (num) + *num = eo_cnt; + + if (eo_cnt == 0) { + _eo_tbl_iter_idx = EM_MAX_EOS; /* UNDEF = _get_next() */ + return EM_EO_UNDEF; + } + + /* find first */ + while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { + _eo_tbl_iter_idx++; + if (_eo_tbl_iter_idx >= EM_MAX_EOS) + return EM_EO_UNDEF; + } + + return eo_idx2hdl(_eo_tbl_iter_idx); +} + +em_eo_t +em_eo_get_next(void) +{ + if (_eo_tbl_iter_idx >= EM_MAX_EOS - 1) + return EM_EO_UNDEF; + + _eo_tbl_iter_idx++; + + /* find next */ + while (!eo_allocated(&em_shm->eo_tbl.eo_elem[_eo_tbl_iter_idx])) { + _eo_tbl_iter_idx++; + if (_eo_tbl_iter_idx >= EM_MAX_EOS) + return EM_EO_UNDEF; + } + + return eo_idx2hdl(_eo_tbl_iter_idx); +} + +em_queue_t +em_eo_queue_get_first(unsigned int *num, em_eo_t eo) +{ + const eo_elem_t *eo_elem = eo_elem_get(eo); + + if (unlikely(eo_elem == NULL || !eo_allocated(eo_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EO_QUEUE_GET_FIRST, + "Invalid EO:%" PRI_EO "", eo); + if (num) + *num = 0; + return EM_QUEUE_UNDEF; + } + + const unsigned int num_queues = env_atomic32_get(&eo_elem->num_queues); + + if (num) + *num = num_queues; + + if (num_queues == 0) { + _eo_q_iter_idx = EM_MAX_QUEUES; /* UNDEF = _get_next() */ + return EM_QUEUE_UNDEF; + } + + /* + * An 'eo_elem' contains a linked list with all it's queues. That list + * might be modified while processing this iteration, so instead we just + * go through the whole queue table. + * This is potentially a slow implementation and perhaps worth + * re-thinking? + */ + const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; + + _eo_q_iter_idx = 0; /* reset list */ + _eo_q_iter_eo = eo; + + /* find first */ + while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || + queue_tbl->queue_elem[_eo_q_iter_idx].eo != _eo_q_iter_eo) { + _eo_q_iter_idx++; + if (_eo_q_iter_idx >= EM_MAX_QUEUES) + return EM_QUEUE_UNDEF; + } + + return queue_idx2hdl(_eo_q_iter_idx); +} + +em_queue_t +em_eo_queue_get_next(void) +{ + if (_eo_q_iter_idx >= EM_MAX_QUEUES - 1) + return EM_QUEUE_UNDEF; + + _eo_q_iter_idx++; + + const queue_tbl_t *const queue_tbl = &em_shm->queue_tbl; + + /* find next */ + while (!queue_allocated(&queue_tbl->queue_elem[_eo_q_iter_idx]) || + queue_tbl->queue_elem[_eo_q_iter_idx].eo != _eo_q_iter_eo) { + _eo_q_iter_idx++; + if (_eo_q_iter_idx >= EM_MAX_QUEUES) + return EM_QUEUE_UNDEF; + } + + return queue_idx2hdl(_eo_q_iter_idx); +} diff --git a/src/event_machine_event.c b/src/event_machine_event.c index f938c7b8..04dfb1d7 100644 --- a/src/event_machine_event.c +++ b/src/event_machine_event.c @@ -1,1142 +1,1494 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -em_event_t -em_alloc(size_t size, em_event_type_t type, em_pool_t pool) -{ - mpool_elem_t *pool_elem; - - pool_elem = pool_elem_get(pool); - if (unlikely(size == 0 || - pool_elem == NULL || !pool_allocated(pool_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ALLOC, - "Invalid args: size:%zu type:%u pool:%" PRI_POOL "", - size, type, pool); - return EM_EVENT_UNDEF; - } - - /* - * EM event pools created with type=SW can not support pkt events. - */ - if (unlikely(pool_elem->event_type == EM_EVENT_TYPE_SW && - em_get_type_major(type) == EM_EVENT_TYPE_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, - "EM-pool:%s(%" PRI_POOL "):\n" - "Invalid event type:0x%" PRIx32 " for buf", - pool_elem->name, pool_elem->em_pool, type); - return EM_EVENT_UNDEF; - } - - event_hdr_t *ev_hdr = event_alloc(pool_elem, size, type); - - if (unlikely(!ev_hdr)) { - em_status_t err = - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC, - "EM-pool:'%s': sz:%zu type:0x%x pool:%" PRI_POOL "", - pool_elem->name, size, type, pool); - if (EM_CHECK_LEVEL > 1 && err != EM_OK && - em_shm->opt.pool.statistics_enable) { - em_pool_info_print(pool); - } - return EM_EVENT_UNDEF; - } - - em_event_t event = ev_hdr->event; - - /* Update event ESV state for alloc */ - if (esv_enabled()) - event = evstate_alloc(event, ev_hdr); - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_alloc(&event, 1, 1, size, type, pool); - - return event; -} - -int -em_alloc_multi(em_event_t events[/*out*/], int num, - size_t size, em_event_type_t type, em_pool_t pool) -{ - if (unlikely(num <= 0)) { - if (num < 0) - INTERNAL_ERROR(EM_ERR_TOO_SMALL, EM_ESCOPE_ALLOC_MULTI, - "Invalid arg: num:%d", num); - return 0; - } - - mpool_elem_t *const pool_elem = pool_elem_get(pool); - int ret; - - if (unlikely(size == 0 || - pool_elem == NULL || !pool_allocated(pool_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ALLOC_MULTI, - "Invalid args: size:%zu type:%u pool:%" PRI_POOL "", - size, type, pool); - return 0; - } - - if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) { - /* - * EM event pools created with type=PKT can support SW events - * as well as pkt events. - */ - ret = event_alloc_pkt_multi(events, num, pool_elem, size, type); - } else { /* pool_elem->event_type == EM_EVENT_TYPE_SW */ - /* - * EM event pools created with type=SW can not support - * pkt events. - */ - if (unlikely(em_get_type_major(type) == EM_EVENT_TYPE_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC_MULTI, - "EM-pool:%s(%" PRI_POOL "): Invalid event type:%u for buf", - pool_elem->name, pool, type); - return 0; - } - - ret = event_alloc_buf_multi(events, num, pool_elem, size, type); - } - - if (unlikely(ret != num)) { - em_status_t err = - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC_MULTI, - "Requested num:%d events, allocated:%d\n" - "EM-pool:'%s': sz:%zu type:0x%x pool:%" PRI_POOL "", - num, ret, - pool_elem->name, size, type, pool); - if (EM_CHECK_LEVEL > 1 && err != EM_OK && - em_shm->opt.pool.statistics_enable) { - em_pool_info_print(pool); - } - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_alloc(events, ret, num, size, type, pool); - - return ret; -} - -void -em_free(em_event_t event) -{ - odp_event_t odp_event; - - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE, - "event undefined!"); - return; - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_free(&event, 1); - - if (esv_enabled()) { - event_hdr_t *const ev_hdr = event_to_hdr(event); - - evstate_free(event, ev_hdr, EVSTATE__FREE); - } - - odp_event = event_em2odp(event); - odp_event_free(odp_event); -} - -void em_free_multi(const em_event_t events[], int num) -{ - if (unlikely(!events || num < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, - "Inv.args: events[]:%p num:%d", events, num); - return; - } - if (unlikely(num == 0)) - return; - - if (EM_CHECK_LEVEL > 1) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, - "events[%d] undefined!", i); - return; - } - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_free(events, num); - - odp_event_t odp_events[num]; - - if (esv_enabled()) { - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - evstate_free_multi(events, ev_hdrs, num, EVSTATE__FREE_MULTI); - } - - events_em2odp(events, odp_events/*out*/, num); - odp_event_free_multi(odp_events, num); -} - -em_status_t -em_send(em_event_t event, em_queue_t queue) -{ - const bool is_external = queue_external(queue); - queue_elem_t *q_elem = NULL; - event_hdr_t *ev_hdr; - int num_sent; - em_status_t stat; - - /* - * Check all args. - */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF, - EM_ERR_BAD_ID, EM_ESCOPE_SEND, "Invalid event"); - - ev_hdr = event_to_hdr(event); - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - - if (!is_external) { - /* queue belongs to this EM instance */ - q_elem = queue_elem_get(queue); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !q_elem, - EM_ERR_BAD_ID, EM_ESCOPE_SEND, - "Invalid queue:%" PRI_QUEUE "", queue); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !queue_allocated(q_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_SEND, - "Invalid queue:%" PRI_QUEUE "", queue); - } - - /* Buffer events from EO-start sent to scheduled queues */ - if (unlikely(!is_external && - q_elem->scheduled && em_locm.start_eo_elem)) { - /* - * em_send() called from within an EO-start function: - * all events sent to scheduled queues will be buffered - * and sent when the EO-start operation completes. - */ - num_sent = eo_start_buffer_events(&event, 1, queue, - EM_EVENT_GROUP_UNDEF); - stat = num_sent == 1 ? EM_OK : EM_ERR_OPERATION_FAILED; - if (EM_CHECK_LEVEL == 0) - return stat; - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND, - "send from EO-start failed"); - return EM_OK; - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(&event, 1, queue, EM_EVENT_GROUP_UNDEF); - - if (esv_enabled()) - evstate_usr2em(event, ev_hdr, EVSTATE__SEND); - - if (is_external) { - /* - * Send out of EM to another device via event-chaining and a - * user-provided function 'event_send_device()' - */ - stat = send_chaining(event, ev_hdr, queue); - if (EM_CHECK_LEVEL == 0) - return stat; - if (unlikely(stat != EM_OK)) { - stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND, - "send_chaining failed: Q:%" PRI_QUEUE "", - queue); - goto send_err; - } - return EM_OK; - } - - /* - * Normal send to a queue on this device - */ - switch (q_elem->type) { - case EM_QUEUE_TYPE_ATOMIC: - case EM_QUEUE_TYPE_PARALLEL: - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - stat = send_event(event, q_elem); - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - stat = queue_unsched_enqueue(event, q_elem); - break; - case EM_QUEUE_TYPE_LOCAL: - stat = send_local(event, ev_hdr, q_elem); - break; - case EM_QUEUE_TYPE_OUTPUT: - stat = send_output(event, ev_hdr, q_elem); - break; - default: - stat = EM_ERR_NOT_FOUND; - break; - } - - if (EM_CHECK_LEVEL == 0) - return stat; - - if (unlikely(stat != EM_OK)) { - stat = - INTERNAL_ERROR(stat, EM_ESCOPE_SEND, - "send failed: Q:%" PRI_QUEUE " type:%" PRI_QTYPE "", - queue, q_elem->type); - goto send_err; - } - - return EM_OK; - -send_err: - if (esv_enabled()) - evstate_usr2em_revert(event, ev_hdr, EVSTATE__SEND__FAIL); - return stat; -} - -/* - * em_send_group_multi() helper: check function arguments - */ -static inline em_status_t -send_multi_check_args(const em_event_t events[], int num, em_queue_t queue, - bool *is_external__out /*out if EM_OK*/, - queue_elem_t **q_elem__out /*out if EM_OK*/) -{ - const bool is_external = queue_external(queue); - queue_elem_t *q_elem = NULL; - int i; - - if (EM_CHECK_LEVEL > 0 && unlikely(!events || num <= 0)) - return EM_ERR_BAD_ARG; - - if (EM_CHECK_LEVEL > 2) { - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) - return EM_ERR_BAD_POINTER; - } - - if (!is_external) { - /* queue belongs to this EM instance */ - q_elem = queue_elem_get(queue); - - if (EM_CHECK_LEVEL > 0 && unlikely(!q_elem)) - return EM_ERR_BAD_ARG; - if (EM_CHECK_LEVEL > 1 && unlikely(!queue_allocated(q_elem))) - return EM_ERR_BAD_STATE; - } - - *is_external__out = is_external; - *q_elem__out = q_elem; /* NULL if is_external */ - return EM_OK; -} - -int -em_send_multi(const em_event_t events[], int num, em_queue_t queue) -{ - bool is_external = false; /* set by check_args */ - queue_elem_t *q_elem = NULL; /* set by check_args */ - int num_sent; - int i; - - /* - * Check all args. - */ - em_status_t err = - send_multi_check_args(events, num, queue, - /*out if EM_OK:*/ &is_external, &q_elem); - if (unlikely(err != EM_OK)) { - INTERNAL_ERROR(err, EM_ESCOPE_SEND_MULTI, - "Invalid args: events:%p num:%d Q:%" PRI_QUEUE "", - events, num, queue); - return 0; - } - - /* Buffer events from EO-start sent to scheduled queues */ - if (unlikely(!is_external && - q_elem->scheduled && em_locm.start_eo_elem)) { - /* - * em_send_multi() called from within an EO-start function: - * all events sent to scheduled queues will be buffered - * and sent when the EO-start operation completes. - */ - num_sent = eo_start_buffer_events(events, num, queue, - EM_EVENT_GROUP_UNDEF); - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_MULTI, - "send-multi EO-start: req:%d, sent:%d", - num, num_sent); - return num_sent; - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - for (i = 0; i < num; i++) - ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(events, num, queue, EM_EVENT_GROUP_UNDEF); - - if (esv_enabled()) - evstate_usr2em_multi(events, ev_hdrs, num, EVSTATE__SEND_MULTI); - - if (is_external) { - /* - * Send out of EM to another device via event-chaining and a - * user-provided function 'event_send_device_multi()' - */ - num_sent = send_chaining_multi(events, ev_hdrs, num, queue); - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_SEND_MULTI, - "send_chaining_multi: req:%d, sent:%d", - num, num_sent); - goto send_multi_err; - } - return num_sent; - } - - /* - * Normal send to a queue on this device - */ - switch (q_elem->type) { - case EM_QUEUE_TYPE_ATOMIC: - case EM_QUEUE_TYPE_PARALLEL: - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - num_sent = send_event_multi(events, num, q_elem); - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - num_sent = queue_unsched_enqueue_multi(events, num, q_elem); - break; - case EM_QUEUE_TYPE_LOCAL: - num_sent = send_local_multi(events, ev_hdrs, num, q_elem); - break; - case EM_QUEUE_TYPE_OUTPUT: - num_sent = send_output_multi(events, ev_hdrs, num, q_elem); - break; - default: - num_sent = 0; - break; - } - - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_MULTI, - "send-multi failed: req:%d, sent:%d", - num, num_sent); - goto send_multi_err; - } - - return num_sent; - -send_multi_err: - if (esv_enabled()) { - evstate_usr2em_revert_multi(&events[num_sent], &ev_hdrs[num_sent], - num - num_sent, - EVSTATE__SEND_MULTI__FAIL); - } - return num_sent; -} - -void * -em_event_pointer(em_event_t event) -{ - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER, - "event undefined!"); - return NULL; - } - - void *ev_ptr = event_pointer(event); - - if (unlikely(!ev_ptr)) - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER, - "Event pointer NULL (unrecognized event type)"); - - return ev_ptr; -} - -size_t -em_event_get_size(em_event_t event) -{ - odp_event_t odp_event; - odp_event_type_t odp_etype; - - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GET_SIZE, - "event undefined!"); - return 0; - } - - odp_event = event_em2odp(event); - odp_etype = odp_event_type(odp_event); - - if (odp_etype == ODP_EVENT_PACKET) { - odp_packet_t odp_pkt = odp_packet_from_event(odp_event); - - return odp_packet_seg_len(odp_pkt); - } else if (odp_etype == ODP_EVENT_BUFFER) { - const event_hdr_t *ev_hdr = event_to_hdr(event); - - return ev_hdr->event_size; - } - - INTERNAL_ERROR(EM_ERR_NOT_FOUND, EM_ESCOPE_EVENT_GET_SIZE, - "Unexpected odp event type:%u", odp_etype); - return 0; -} - -em_pool_t em_event_get_pool(em_event_t event) -{ - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_POOL, - "event undefined!"); - return EM_POOL_UNDEF; - } - - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t type = odp_event_type(odp_event); - odp_pool_t odp_pool = ODP_POOL_INVALID; - - if (type == ODP_EVENT_PACKET) { - odp_packet_t pkt = odp_packet_from_event(odp_event); - - odp_pool = odp_packet_pool(pkt); - } else if (type == ODP_EVENT_BUFFER) { - odp_buffer_t buf = odp_buffer_from_event(odp_event); - - odp_pool = odp_buffer_pool(buf); - } - - if (unlikely(odp_pool == ODP_POOL_INVALID)) - return EM_POOL_UNDEF; - - em_pool_t pool = pool_odp2em(odp_pool); - - /* - * Don't report an error if 'pool == EM_POOL_UNDEF' since that might - * happen if the event is input from pktio that is using external - * (to EM) odp pools. - */ - return pool; -} - -em_status_t -em_event_set_type(em_event_t event, em_event_type_t newtype) -{ - event_hdr_t *ev_hdr; - - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, EM_ERR_BAD_ID, - EM_ESCOPE_EVENT_SET_TYPE, "event undefined!") - - ev_hdr = event_to_hdr(event); - - if (EM_CHECK_LEVEL > 0) - RETURN_ERROR_IF(ev_hdr == NULL, EM_ERR_BAD_POINTER, - EM_ESCOPE_EVENT_SET_TYPE, "ev_hdr == NULL"); - - ev_hdr->event_type = newtype; - - return EM_OK; -} - -em_event_type_t -em_event_get_type(em_event_t event) -{ - const event_hdr_t *ev_hdr; - - if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GET_TYPE, - "event undefined!"); - return EM_EVENT_TYPE_UNDEF; - } - - ev_hdr = event_to_hdr(event); - - if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_GET_TYPE, - "ev_hdr == NULL"); - return EM_EVENT_TYPE_UNDEF; - } - - return ev_hdr->event_type; -} - -int em_event_get_type_multi(const em_event_t events[], int num, - em_event_type_t types[/*out:num*/]) -{ - int i; - - /* Check all args */ - if (EM_CHECK_LEVEL > 0) { - if (unlikely(!events || num < 0 || !types)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_GET_TYPE_MULTI, - "Inv.args: events:%p num:%d types:%p", - events, num, types); - return 0; - } - if (unlikely(!num)) - return 0; - } - - if (EM_CHECK_LEVEL > 1) { - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, - EM_ESCOPE_EVENT_GET_TYPE_MULTI, - "events[%d] undefined!", i); - return 0; - } - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - - for (i = 0; i < num; i++) - types[i] = ev_hdrs[i]->event_type; - - return num; -} - -int em_event_same_type_multi(const em_event_t events[], int num, - em_event_type_t *same_type /*out*/) -{ - /* Check all args */ - if (EM_CHECK_LEVEL > 0) { - if (unlikely(!events || num < 0 || !same_type)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_SAME_TYPE_MULTI, - "Inv.args: events:%p num:%d same_type:%p", - events, num, same_type); - return 0; - } - if (unlikely(!num)) - return 0; - } - - if (EM_CHECK_LEVEL > 1) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, - EM_ESCOPE_EVENT_SAME_TYPE_MULTI, - "events[%d] undefined!", i); - return 0; - } - } - - const em_event_type_t type = event_to_hdr(events[0])->event_type; - int same = 1; - - for (; same < num && type == event_to_hdr(events[same])->event_type; - same++) - ; - - *same_type = type; - return same; -} - -em_status_t em_event_mark_send(em_event_t event, em_queue_t queue) -{ - if (!esv_enabled()) - return EM_OK; - - const queue_elem_t *const q_elem = queue_elem_get(queue); - - /* Check all args */ - if (EM_CHECK_LEVEL >= 1) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF || q_elem == NULL, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, - "Inv.args: event:%" PRI_EVENT " Q:%" PRI_QUEUE "", - event, queue); - if (EM_CHECK_LEVEL >= 1) - RETURN_ERROR_IF(!queue_allocated(q_elem) || !q_elem->scheduled, - EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_MARK_SEND, - "Inv.queue:%" PRI_QUEUE " type:%" PRI_QTYPE "", - queue, q_elem->type); - - event_hdr_t *ev_hdr = event_to_hdr(event); - - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - evstate_usr2em(event, ev_hdr, EVSTATE__MARK_SEND); - - /* - * Data memory barrier, we are bypassing em_send(), odp_queue_enq() - * and need to guarantee memory sync before the event ends up into an - * EM queue again. - */ - odp_mb_full(); - - return EM_OK; -} - -em_status_t em_event_unmark_send(em_event_t event) -{ - if (!esv_enabled()) - return EM_OK; - - /* Check all args */ - if (EM_CHECK_LEVEL >= 1) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_SEND, - "Inv.args: event:%" PRI_EVENT "", event); - - event_hdr_t *ev_hdr = event_to_hdr(event); - - evstate_unmark_send(event, ev_hdr); - - return EM_OK; -} - -void em_event_mark_free(em_event_t event) -{ - if (!esv_enabled()) - return; - - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE, - "Event undefined!"); - return; - } - - event_hdr_t *const ev_hdr = event_to_hdr(event); - - evstate_free(event, ev_hdr, EVSTATE__MARK_FREE); -} - -void em_event_unmark_free(em_event_t event) -{ - if (!esv_enabled()) - return; - - if (unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE, - "Event undefined!"); - return; - } - - event_hdr_t *const ev_hdr = event_to_hdr(event); - - evstate_unmark_free(event, ev_hdr); -} - -void em_event_mark_free_multi(const em_event_t events[], int num) -{ - if (!esv_enabled()) - return; - - if (unlikely(!events || num < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE_MULTI, - "Inv.args: events[]:%p num:%d", events, num); - return; - } - if (unlikely(num == 0)) - return; - - if (EM_CHECK_LEVEL > 1) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_MARK_FREE_MULTI, - "events[%d] undefined!", i); - return; - } - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - evstate_free_multi(events, ev_hdrs, num, EVSTATE__MARK_FREE_MULTI); -} - -void em_event_unmark_free_multi(const em_event_t events[], int num) -{ - if (!esv_enabled()) - return; - - if (unlikely(!events || num < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, - "Inv.args: events[]:%p num:%d", events, num); - return; - } - if (unlikely(num == 0)) - return; - - if (EM_CHECK_LEVEL > 1) { - int i; - - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, - EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, - "events[%d] undefined!", i); - return; - } - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - evstate_unmark_free_multi(events, ev_hdrs, num); -} - -em_event_t em_event_clone(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/) -{ - const mpool_elem_t *pool_elem = pool_elem_get(pool); - - /* Check all args */ - if (EM_CHECK_LEVEL >= 1 && - unlikely(event == EM_EVENT_UNDEF || - (pool != EM_POOL_UNDEF && - (pool_elem == NULL || !pool_allocated(pool_elem))))) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_CLONE, - "Inv.args: event:%" PRI_EVENT " pool:%" PRI_POOL "", - event, pool); - return EM_EVENT_UNDEF; - } - - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t odp_evtype = odp_event_type(odp_event); - odp_pool_t odp_pool = ODP_POOL_INVALID; - odp_packet_t pkt = ODP_PACKET_INVALID; - odp_buffer_t buf = ODP_BUFFER_INVALID; - - if (unlikely(odp_evtype != ODP_EVENT_PACKET && - odp_evtype != ODP_EVENT_BUFFER)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_CLONE, - "Inv. odp-event-type:%d", odp_evtype); - return EM_EVENT_UNDEF; - } - - /* Obtain the event-hdr, event-size and the pool to use */ - const event_hdr_t *ev_hdr; - size_t size; - em_event_type_t type; - em_pool_t em_pool = pool; - em_event_t clone_event; /* return value */ - - if (odp_evtype == ODP_EVENT_PACKET) { - pkt = odp_packet_from_event(odp_event); - ev_hdr = odp_packet_user_area(pkt); - size = odp_packet_seg_len(pkt); - if (pool == EM_POOL_UNDEF) { - odp_pool = odp_packet_pool(pkt); - em_pool = pool_odp2em(odp_pool); - } - } else /* ODP_EVENT_BUFFER */ { - buf = odp_buffer_from_event(odp_event); - ev_hdr = odp_buffer_addr(buf); - size = ev_hdr->event_size; - if (pool == EM_POOL_UNDEF) { - odp_pool = odp_buffer_pool(buf); - em_pool = pool_odp2em(odp_pool); - } - } - - /* No EM-pool found */ - if (em_pool == EM_POOL_UNDEF) { - if (unlikely(odp_evtype == ODP_EVENT_BUFFER)) { - INTERNAL_ERROR(EM_ERR_NOT_FOUND, EM_ESCOPE_EVENT_CLONE, - "No suitable event-pool found"); - return EM_EVENT_UNDEF; - } - /* odp_evtype == ODP_EVENT_PACKET: - * Not an EM-pool, e.g. event from external pktio odp-pool. - * Allocate and clone pkt via ODP directly. - */ - clone_event = pkt_clone_odp(pkt, odp_pool); - if (unlikely(clone_event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_EVENT_CLONE, - "Cloning from ext odp-pool:%" PRIu64 " failed", - odp_pool_to_u64(odp_pool)); - } - return clone_event; - } - - /* - * Clone the event from an EM-pool: - */ - pool_elem = pool_elem_get(em_pool); - type = ev_hdr->event_type; - - /* EM event pools created with type=SW can not support pkt events */ - if (unlikely(pool_elem->event_type == EM_EVENT_TYPE_SW && - em_get_type_major(type) == EM_EVENT_TYPE_PACKET)) { - INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_CLONE, - "EM-pool:%s(%" PRI_POOL "):\n" - "Invalid event type:0x%" PRIx32 " for buf", - pool_elem->name, em_pool, type); - return EM_EVENT_UNDEF; - } - - event_hdr_t *clone_hdr = event_alloc(pool_elem, size, type); - - if (unlikely(!clone_hdr)) { - em_status_t err = - INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_EVENT_CLONE, - "EM-pool:'%s': sz:%zu type:0x%x pool:%" PRI_POOL "", - pool_elem->name, size, type, em_pool); - if (EM_CHECK_LEVEL > 1 && err != EM_OK && - em_shm->opt.pool.statistics_enable) - em_pool_info_print(em_pool); - return EM_EVENT_UNDEF; - } - - clone_event = clone_hdr->event; - /* Update clone_event ESV state for the clone-alloc */ - if (esv_enabled()) - clone_event = evstate_clone(clone_event, clone_hdr); - - /* Call the 'alloc' API hook function also for event-clone */ - if (EM_API_HOOKS_ENABLE) - call_api_hooks_alloc(&clone_event, 1, 1, size, type, pool); - - /* Copy event payload from the parent event into the clone event */ - const void *src = event_pointer(event); - void *dst = event_pointer(clone_event); - - memcpy(dst, src, size); - - return clone_event; -} - -static int event_uarea_init(em_event_t event, event_hdr_t **ev_hdr/*out*/) -{ - odp_event_t odp_event = event_em2odp(event); - odp_event_type_t odp_evtype = odp_event_type(odp_event); - odp_pool_t odp_pool = ODP_POOL_INVALID; - odp_packet_t odp_pkt; - odp_buffer_t odp_buf; - event_hdr_t *hdr; - bool is_init; - - switch (odp_evtype) { - case ODP_EVENT_PACKET: - odp_pkt = odp_packet_from_event(odp_event); - hdr = odp_packet_user_area(odp_pkt); - is_init = hdr->user_area.isinit; - if (!is_init) - odp_pool = odp_packet_pool(odp_pkt); - break; - case ODP_EVENT_BUFFER: - odp_buf = odp_buffer_from_event(odp_event); - hdr = odp_buffer_addr(odp_buf); - is_init = hdr->user_area.isinit; - if (!is_init) - odp_pool = odp_buffer_pool(odp_buf); - break; - default: - return -1; - } - - *ev_hdr = hdr; - - if (!is_init) { - /* - * Event user area metadata is not initialized in - * the event header - initialize it: - */ - hdr->user_area.all = 0; /* user_area.{} = all zero (.sizes=0) */ - hdr->user_area.isinit = 1; - - em_pool_t pool = pool_odp2em(odp_pool); - - if (pool == EM_POOL_UNDEF) - return 0; /* ext ODP pool: OK, no user area, sz=0 */ - - /* Event from an EM event pool, can init event user area */ - const mpool_elem_t *pool_elem = pool_elem_get(pool); - - if (unlikely(!pool_elem)) - return -2; /* invalid pool_elem */ - - hdr->user_area.req_size = pool_elem->user_area.req_size; - hdr->user_area.pad_size = pool_elem->user_area.pad_size; - } - - return 0; -} - -void *em_event_uarea_get(em_event_t event, size_t *size /*out, if given*/) -{ - /* Check args */ - if (EM_CHECK_LEVEL >= 1 && - unlikely(event == EM_EVENT_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_GET, - "Inv.arg: event undef"); - goto no_uarea; - } - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_EVENT_UAREA_GET, - "Cannot init event user area: %d", err); - goto no_uarea; - } - - if (ev_hdr->user_area.req_size == 0) - goto no_uarea; - - /* - * Event has user area configured, return pointer and size - */ - void *uarea_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t)); - - if (size) - *size = ev_hdr->user_area.req_size; - - return uarea_ptr; - -no_uarea: - if (size) - *size = 0; - return NULL; -} - -em_status_t em_event_uarea_id_set(em_event_t event, uint16_t id) -{ - /* Check args */ - if (EM_CHECK_LEVEL >= 1) - RETURN_ERROR_IF(event == EM_EVENT_UNDEF, - EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_SET, - "Inv.arg: event undef"); - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - RETURN_ERROR_IF(err, EM_ERR_OPERATION_FAILED, - EM_ESCOPE_EVENT_UAREA_ID_SET, - "Cannot init event user area: %d", err); - - ev_hdr->user_area.id = id; - ev_hdr->user_area.isset_id = 1; - - return EM_OK; -} - -em_status_t em_event_uarea_id_get(em_event_t event, bool *isset /*out*/, - uint16_t *id /*out*/) -{ - bool id_set = false; - em_status_t status = EM_OK; - - /* Check args, either 'isset' or 'id' ptrs must be provided (or both) */ - if (EM_CHECK_LEVEL >= 1 && - (event == EM_EVENT_UNDEF || !(id || isset))) { - status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_GET, - "Inv.args: event:%" PRI_EVENT " isset:%p id:%p", - event, isset, id); - goto id_isset; - } - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (unlikely(err)) { - status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_EVENT_UAREA_ID_GET, - "Cannot init event user area: %d", err); - goto id_isset; - } - - if (ev_hdr->user_area.isset_id) { - /* user-area-id has been set */ - id_set = true; - if (id) - *id = ev_hdr->user_area.id; /*out*/ - } - -id_isset: - if (isset) - *isset = id_set; /*out*/ - return status; -} - -em_status_t em_event_uarea_info(em_event_t event, - em_event_uarea_info_t *uarea_info /*out*/) -{ - em_status_t status = EM_ERROR; - - /* Check args */ - if (EM_CHECK_LEVEL >= 1 && - unlikely(event == EM_EVENT_UNDEF || !uarea_info)) { - status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_INFO, - "Inv.args: event:%" PRI_EVENT " uarea_info:%p", - event, uarea_info); - goto err_uarea; - } - - event_hdr_t *ev_hdr = NULL; - int err = event_uarea_init(event, &ev_hdr/*out*/); - - if (unlikely(err)) { - status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, - EM_ESCOPE_EVENT_UAREA_INFO, - "Cannot init event user area: %d", err); - goto err_uarea; - } - - if (ev_hdr->user_area.req_size == 0) { - uarea_info->uarea = NULL; - uarea_info->size = 0; - } else { - uarea_info->uarea = (void *)((uintptr_t)ev_hdr + - sizeof(event_hdr_t)); - uarea_info->size = ev_hdr->user_area.req_size; - } - - if (ev_hdr->user_area.isset_id) { - uarea_info->id.isset = true; - uarea_info->id.value = ev_hdr->user_area.id; - } else { - uarea_info->id.isset = false; - uarea_info->id.value = 0; - } - - return EM_OK; - -err_uarea: - if (uarea_info) { - uarea_info->uarea = NULL; - uarea_info->size = 0; - uarea_info->id.isset = false; - uarea_info->id.value = 0; - } - return status; -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +em_event_t em_alloc(uint32_t size, em_event_type_t type, em_pool_t pool) +{ + mpool_elem_t *pool_elem; + + pool_elem = pool_elem_get(pool); + if (unlikely(size == 0 || + pool_elem == NULL || !pool_allocated(pool_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ALLOC, + "Invalid args: size:%u type:%u pool:%" PRI_POOL "", + size, type, pool); + return EM_EVENT_UNDEF; + } + + em_event_type_t major_type = em_event_type_major(type); + + /* + * EM event pools created with type=SW can not support pkt events. + */ + if (unlikely(pool_elem->event_type == EM_EVENT_TYPE_SW && + major_type == EM_EVENT_TYPE_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, + "EM-pool:%s(%" PRI_POOL "):\n" + "Invalid event type:0x%x for buf", + pool_elem->name, pool_elem->em_pool, type); + return EM_EVENT_UNDEF; + } + if (unlikely(pool_elem->event_type == EM_EVENT_TYPE_VECTOR && + major_type != EM_EVENT_TYPE_VECTOR)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, + "EM-pool:%s(%" PRI_POOL "):\n" + "Invalid event type:0x%x for vector", + pool_elem->name, pool_elem->em_pool, type); + return EM_EVENT_UNDEF; + } + + event_hdr_t *ev_hdr = event_alloc(pool_elem, size, type); + + if (unlikely(!ev_hdr)) { + em_status_t err = + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC, + "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", + pool_elem->name, size, type, pool); + if (EM_CHECK_LEVEL > 1 && err != EM_OK && + em_shm->opt.pool.statistics_enable) { + em_pool_info_print(pool); + } + return EM_EVENT_UNDEF; + } + + em_event_t event = ev_hdr->event; + + /* Update event ESV state for alloc */ + if (esv_enabled()) + event = evstate_alloc(event, ev_hdr); + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_alloc(&event, 1, 1, size, type, pool); + + return event; +} + +int em_alloc_multi(em_event_t events[/*out*/], int num, + uint32_t size, em_event_type_t type, em_pool_t pool) +{ + if (unlikely(num <= 0)) { + if (num < 0) + INTERNAL_ERROR(EM_ERR_TOO_SMALL, EM_ESCOPE_ALLOC_MULTI, + "Invalid arg: num:%d", num); + return 0; + } + + mpool_elem_t *const pool_elem = pool_elem_get(pool); + int ret = 0; + + if (unlikely(size == 0 || + pool_elem == NULL || !pool_allocated(pool_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_ALLOC_MULTI, + "Invalid args: size:%u type:%u pool:%" PRI_POOL "", + size, type, pool); + return 0; + } + + if (pool_elem->event_type == EM_EVENT_TYPE_PACKET) { + /* + * EM event pools created with type=PKT can support SW events + * as well as pkt events. + */ + ret = event_alloc_pkt_multi(events, num, pool_elem, size, type); + } else if (pool_elem->event_type == EM_EVENT_TYPE_SW) { + /* + * EM event pools created with type=SW can not support + * pkt events. + */ + if (unlikely(em_event_type_major(type) == EM_EVENT_TYPE_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC_MULTI, + "EM-pool:%s(%" PRI_POOL "): Invalid event type:0x%x for buf", + pool_elem->name, pool, type); + return 0; + } + + ret = event_alloc_buf_multi(events, num, pool_elem, size, type); + } else if (pool_elem->event_type == EM_EVENT_TYPE_VECTOR) { + if (unlikely(em_event_type_major(type) != EM_EVENT_TYPE_VECTOR)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_ALLOC, + "EM-pool:%s(%" PRI_POOL "): Inv. event type:0x%x for vector", + pool_elem->name, pool, type); + return 0; + } + ret = event_alloc_vector_multi(events, num, pool_elem, size, type); + } + + if (unlikely(ret != num)) { + em_status_t err = + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_ALLOC_MULTI, + "Requested num:%d events, allocated:%d\n" + "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", + num, ret, + pool_elem->name, size, type, pool); + if (EM_CHECK_LEVEL > 1 && err != EM_OK && + em_shm->opt.pool.statistics_enable) { + em_pool_info_print(pool); + } + } + + if (EM_API_HOOKS_ENABLE && ret > 0) + call_api_hooks_alloc(events, ret, num, size, type, pool); + + return ret; +} + +/** + * @brief Helper to check if the event is a vector + * + * @param vector_event Event handle + * @return true the event is a vector + * @return false the event is NOT a vector + */ +static inline bool is_vector_type(em_event_t vector_event) +{ + const event_hdr_t *ev_hdr = event_to_hdr(vector_event); + em_event_type_t etype = em_event_type_major(ev_hdr->event_type); + odp_event_t odp_event = event_em2odp(vector_event); + odp_event_type_t odp_etype = odp_event_type(odp_event); + + if (etype != EM_EVENT_TYPE_VECTOR || + odp_etype != ODP_EVENT_PACKET_VECTOR) + return false; + + return true; +} + +/** + * @brief Helper to check if the event is a vector, if not report an error + * + * @param vector_event Event handle + * @param escope Error scope to use if reporting an error + * @return true the event is a vector + * @return false the event is NOT a vector, reports an error + */ +static inline bool is_vector_type_or_error(em_event_t vector_event, + em_escope_t escope) +{ + bool is_vec = is_vector_type(vector_event); + + if (likely(is_vec)) + return true; + + INTERNAL_ERROR(EM_ERR_BAD_ID, escope, "Event not a vector"); + return false; +} + +/** + * @brief Handle ESV state for 'em_free' for the event-table of a vector event + * + * @param event Vector event handle + */ +static void event_vector_prepare_free_full(em_event_t event, const uint16_t api_op) +{ + /* em_free() frees the vector as well as all the events it contains */ + em_event_t *ev_tbl; + uint32_t sz = event_vector_tbl(event, &ev_tbl); + + if (sz) { + event_hdr_t *ev_hdrs[sz]; + + event_to_hdr_multi(ev_tbl, ev_hdrs, sz); + evstate_free_multi(ev_tbl, ev_hdrs, sz, api_op); + + /* drop ESV generation from event handles */ + (void)events_em2pkt_inplace(ev_tbl, sz); + } +} + +void em_free(em_event_t event) +{ + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE, + "event undefined!"); + return; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_free(&event, 1); + + if (esv_enabled()) { + event_hdr_t *const ev_hdr = event_to_hdr(event); + + evstate_free(event, ev_hdr, EVSTATE__FREE); + + if (is_vector_type(event)) + event_vector_prepare_free_full(event, EVSTATE__FREE); + } + + odp_event_t odp_event = event_em2odp(event); + + odp_event_free(odp_event); +} + +void em_free_multi(const em_event_t events[], int num) +{ + if (unlikely(!events || num < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, + "Inv.args: events[]:%p num:%d", events, num); + return; + } + if (unlikely(num == 0)) + return; + + if (EM_CHECK_LEVEL > 1) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_FREE_MULTI, + "events[%d] undefined!", i); + return; + } + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_free(events, num); + + odp_event_t odp_events[num]; + + if (esv_enabled()) { + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + evstate_free_multi(events, ev_hdrs, num, EVSTATE__FREE_MULTI); + + for (int i = 0; i < num; i++) { + if (is_vector_type(events[i])) + event_vector_prepare_free_full(events[i], EVSTATE__FREE_MULTI); + } + } + + events_em2odp(events, odp_events/*out*/, num); + odp_event_free_multi(odp_events, num); +} + +em_status_t em_send(em_event_t event, em_queue_t queue) +{ + const bool is_external = queue_external(queue); + queue_elem_t *q_elem = NULL; + event_hdr_t *ev_hdr; + int num_sent; + em_status_t stat; + + /* + * Check all args. + */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF, + EM_ERR_BAD_ID, EM_ESCOPE_SEND, "Invalid event"); + + ev_hdr = event_to_hdr(event); + + /* avoid unnecessary writing 'undef' in case event is a ref */ + if (ev_hdr->egrp != EM_EVENT_GROUP_UNDEF) + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + + if (!is_external) { + /* queue belongs to this EM instance */ + q_elem = queue_elem_get(queue); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !q_elem, + EM_ERR_BAD_ID, EM_ESCOPE_SEND, + "Invalid queue:%" PRI_QUEUE "", queue); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !queue_allocated(q_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_SEND, + "Invalid queue:%" PRI_QUEUE "", queue); + } + + /* Buffer events from EO-start sent to scheduled queues */ + if (unlikely(!is_external && + q_elem->scheduled && em_locm.start_eo_elem)) { + /* + * em_send() called from within an EO-start function: + * all events sent to scheduled queues will be buffered + * and sent when the EO-start operation completes. + */ + if (esv_enabled()) + evstate_usr2em(event, ev_hdr, EVSTATE__SEND); + + num_sent = eo_start_buffer_events(&event, 1, queue); + stat = num_sent == 1 ? EM_OK : EM_ERR_OPERATION_FAILED; + if (EM_CHECK_LEVEL == 0) + return stat; + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND, + "send from EO-start failed"); + return EM_OK; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(&event, 1, queue, EM_EVENT_GROUP_UNDEF); + + if (is_external || q_elem->type == EM_QUEUE_TYPE_OUTPUT) { + /* + * Send out of EM, either + * - via event-chaining and a user-provided function + * 'event_send_device()' to another device + * OR + * - via an EM output-queue and a user provided function of type + * em_output_func_t + */ + if (is_external) /* EMC/BIP */ + stat = send_chaining(event, queue); + else /* EM output queue */ + stat = send_output(event, q_elem); + + if (EM_CHECK_LEVEL == 0) + return stat; + + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND, + "send out-of-EM via %s failed: Q:%" PRI_QUEUE "", + is_external ? "event-chaining" : "output queue", + queue); + return EM_OK; + } + + /* + * Normal send to a queue on this device + */ + if (esv_enabled()) + evstate_usr2em(event, ev_hdr, EVSTATE__SEND); + + switch (q_elem->type) { + case EM_QUEUE_TYPE_ATOMIC: + case EM_QUEUE_TYPE_PARALLEL: + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + stat = send_event(event, q_elem); + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + stat = queue_unsched_enqueue(event, q_elem); + break; + case EM_QUEUE_TYPE_LOCAL: + stat = send_local(event, q_elem); + break; + default: + stat = EM_ERR_NOT_FOUND; + break; + } + + if (EM_CHECK_LEVEL == 0) + return stat; + + if (unlikely(stat != EM_OK)) { + stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND, + "send failed: Q:%" PRI_QUEUE " type:%" PRI_QTYPE "", + queue, q_elem->type); + if (esv_enabled()) + evstate_usr2em_revert(event, ev_hdr, + EVSTATE__SEND__FAIL); + return stat; + } + + return EM_OK; +} + +/* + * em_send_group_multi() helper: check function arguments + */ +static inline em_status_t +send_multi_check_args(const em_event_t events[], int num, em_queue_t queue, + bool *is_external__out /*out if EM_OK*/, + queue_elem_t **q_elem__out /*out if EM_OK*/) +{ + const bool is_external = queue_external(queue); + queue_elem_t *q_elem = NULL; + int i; + + if (EM_CHECK_LEVEL > 0 && unlikely(!events || num <= 0)) + return EM_ERR_BAD_ARG; + + if (EM_CHECK_LEVEL > 2) { + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) + return EM_ERR_BAD_POINTER; + } + + if (!is_external) { + /* queue belongs to this EM instance */ + q_elem = queue_elem_get(queue); + + if (EM_CHECK_LEVEL > 0 && unlikely(!q_elem)) + return EM_ERR_BAD_ARG; + if (EM_CHECK_LEVEL > 1 && unlikely(!queue_allocated(q_elem))) + return EM_ERR_BAD_STATE; + } + + *is_external__out = is_external; + *q_elem__out = q_elem; /* NULL if is_external */ + return EM_OK; +} + +int em_send_multi(const em_event_t events[], int num, em_queue_t queue) +{ + bool is_external = false; /* set by check_args */ + queue_elem_t *q_elem = NULL; /* set by check_args */ + int num_sent; + + /* + * Check all args. + */ + em_status_t err = + send_multi_check_args(events, num, queue, + /*out if EM_OK:*/ &is_external, &q_elem); + if (unlikely(err != EM_OK)) { + INTERNAL_ERROR(err, EM_ESCOPE_SEND_MULTI, + "Invalid args: events:%p num:%d Q:%" PRI_QUEUE "", + events, num, queue); + return 0; + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + for (int i = 0; i < num; i++) { + /* avoid unnecessary writing 'undef' in case event is a ref */ + if (ev_hdrs[i]->egrp != EM_EVENT_GROUP_UNDEF) + ev_hdrs[i]->egrp = EM_EVENT_GROUP_UNDEF; + } + + /* Buffer events from EO-start sent to scheduled queues */ + if (unlikely(!is_external && + q_elem->scheduled && em_locm.start_eo_elem)) { + /* + * em_send_multi() called from within an EO-start function: + * all events sent to scheduled queues will be buffered + * and sent when the EO-start operation completes. + */ + if (esv_enabled()) + evstate_usr2em_multi(events, ev_hdrs, num, + EVSTATE__SEND_MULTI); + num_sent = eo_start_buffer_events(events, num, queue); + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_MULTI, + "send-multi EO-start: req:%d, sent:%d", + num, num_sent); + return num_sent; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(events, num, queue, EM_EVENT_GROUP_UNDEF); + + if (is_external || q_elem->type == EM_QUEUE_TYPE_OUTPUT) { + /* + * Send out of EM, either + * - via event-chaining and a user-provided function + * 'event_send_device()' to another device + * OR + * - via an EM output-queue and a user provided function of type + * em_output_func_t + */ + if (is_external) /* EMC/BIP */ + num_sent = send_chaining_multi(events, num, queue); + else /* EM output queue */ + num_sent = send_output_multi(events, num, q_elem); + + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_SEND_MULTI, + "send_chaining_multi: req:%d, sent:%d", + num, num_sent); + } + return num_sent; + } + + /* + * Normal send to a queue on this device + */ + if (esv_enabled()) + evstate_usr2em_multi(events, ev_hdrs, num, EVSTATE__SEND_MULTI); + + switch (q_elem->type) { + case EM_QUEUE_TYPE_ATOMIC: + case EM_QUEUE_TYPE_PARALLEL: + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + num_sent = send_event_multi(events, num, q_elem); + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + num_sent = queue_unsched_enqueue_multi(events, num, q_elem); + break; + case EM_QUEUE_TYPE_LOCAL: + num_sent = send_local_multi(events, num, q_elem); + break; + default: + num_sent = 0; + break; + } + + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_MULTI, + "send-multi failed: req:%d, sent:%d", + num, num_sent); + if (esv_enabled()) + evstate_usr2em_revert_multi(&events[num_sent], + &ev_hdrs[num_sent], + num - num_sent, + EVSTATE__SEND_MULTI__FAIL); + } + + return num_sent; +} + +void *em_event_pointer(em_event_t event) +{ + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER, + "event undefined!"); + return NULL; + } + + void *ev_ptr = event_pointer(event); + + if (unlikely(!ev_ptr)) + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_POINTER, + "Event pointer NULL (unsupported event type)"); + + return ev_ptr; +} + +uint32_t em_event_get_size(em_event_t event) +{ + odp_event_t odp_event; + odp_event_type_t odp_etype; + + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GET_SIZE, + "event undefined!"); + return 0; + } + + odp_event = event_em2odp(event); + odp_etype = odp_event_type(odp_event); + + if (odp_etype == ODP_EVENT_PACKET) { + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + return odp_packet_seg_len(odp_pkt); + } else if (odp_etype == ODP_EVENT_BUFFER) { + const event_hdr_t *ev_hdr = event_to_hdr(event); + + return ev_hdr->event_size; + } + + INTERNAL_ERROR(EM_ERR_NOT_FOUND, EM_ESCOPE_EVENT_GET_SIZE, + "Unexpected odp event type:%u", odp_etype); + return 0; +} + +em_pool_t em_event_get_pool(em_event_t event) +{ + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_GET_POOL, + "event undefined!"); + return EM_POOL_UNDEF; + } + + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t type = odp_event_type(odp_event); + odp_pool_t odp_pool = ODP_POOL_INVALID; + + if (type == ODP_EVENT_PACKET) { + odp_packet_t pkt = odp_packet_from_event(odp_event); + + odp_pool = odp_packet_pool(pkt); + } else if (type == ODP_EVENT_BUFFER) { + odp_buffer_t buf = odp_buffer_from_event(odp_event); + + odp_pool = odp_buffer_pool(buf); + } else if (type == ODP_EVENT_PACKET_VECTOR) { + odp_packet_vector_t pktvec = odp_packet_vector_from_event(odp_event); + + odp_pool = odp_packet_vector_pool(pktvec); + } + + if (unlikely(odp_pool == ODP_POOL_INVALID)) + return EM_POOL_UNDEF; + + em_pool_t pool = pool_odp2em(odp_pool); + + /* + * Don't report an error if 'pool == EM_POOL_UNDEF' since that might + * happen if the event is e.g. input from pktio that is using external + * (to EM) odp pools. + */ + return pool; +} + +em_status_t em_event_set_type(em_event_t event, em_event_type_t newtype) +{ + if (EM_CHECK_LEVEL > 0) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_SET_TYPE, "event undefined!"); + + /* similar to 'ev_hdr = event_to_hdr(event)', slightly extended: */ + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t evtype = odp_event_type(odp_event); + event_hdr_t *ev_hdr; + + switch (evtype) { + case ODP_EVENT_PACKET: { + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + + ev_hdr = odp_packet_user_area(odp_pkt); + break; + } + case ODP_EVENT_BUFFER: { + odp_buffer_t odp_buf = odp_buffer_from_event(odp_event); + + ev_hdr = odp_buffer_addr(odp_buf); + break; + } + case ODP_EVENT_PACKET_VECTOR: { + odp_packet_vector_t odp_pktvec = odp_packet_vector_from_event(odp_event); + em_event_type_t new_major = em_event_type_major(newtype); + + RETURN_ERROR_IF(new_major != EM_EVENT_TYPE_VECTOR, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_SET_TYPE, + "Event type:0x%x not suitable for a vector", newtype); + ev_hdr = odp_packet_vector_user_area(odp_pktvec); + break; + } + default: + return INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_SET_TYPE, + "Unsupported odp event type:%u", evtype); + } + + ev_hdr->event_type = newtype; + + return EM_OK; +} + +em_event_type_t em_event_get_type(em_event_t event) +{ + const event_hdr_t *ev_hdr; + + if (EM_CHECK_LEVEL > 0 && unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GET_TYPE, + "event undefined!"); + return EM_EVENT_TYPE_UNDEF; + } + + ev_hdr = event_to_hdr(event); + + if (EM_CHECK_LEVEL > 0 && unlikely(ev_hdr == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_EVENT_GET_TYPE, + "ev_hdr == NULL"); + return EM_EVENT_TYPE_UNDEF; + } + + return ev_hdr->event_type; +} + +int em_event_get_type_multi(const em_event_t events[], int num, + em_event_type_t types[/*out:num*/]) +{ + int i; + + /* Check all args */ + if (EM_CHECK_LEVEL > 0) { + if (unlikely(!events || num < 0 || !types)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_GET_TYPE_MULTI, + "Inv.args: events:%p num:%d types:%p", + events, num, types); + return 0; + } + if (unlikely(!num)) + return 0; + } + + if (EM_CHECK_LEVEL > 1) { + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, + EM_ESCOPE_EVENT_GET_TYPE_MULTI, + "events[%d] undefined!", i); + return 0; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + + for (i = 0; i < num; i++) + types[i] = ev_hdrs[i]->event_type; + + return num; +} + +int em_event_same_type_multi(const em_event_t events[], int num, + em_event_type_t *same_type /*out*/) +{ + /* Check all args */ + if (EM_CHECK_LEVEL > 0) { + if (unlikely(!events || num < 0 || !same_type)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_SAME_TYPE_MULTI, + "Inv.args: events:%p num:%d same_type:%p", + events, num, same_type); + return 0; + } + if (unlikely(!num)) + return 0; + } + + if (EM_CHECK_LEVEL > 1) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, + EM_ESCOPE_EVENT_SAME_TYPE_MULTI, + "events[%d] undefined!", i); + return 0; + } + } + + const em_event_type_t type = event_to_hdr(events[0])->event_type; + int same = 1; + + for (; same < num && type == event_to_hdr(events[same])->event_type; + same++) + ; + + *same_type = type; + return same; +} + +em_status_t em_event_mark_send(em_event_t event, em_queue_t queue) +{ + if (!esv_enabled()) + return EM_OK; + + const queue_elem_t *const q_elem = queue_elem_get(queue); + + /* Check all args */ + if (EM_CHECK_LEVEL >= 1) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF || q_elem == NULL, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_SEND, + "Inv.args: event:%" PRI_EVENT " Q:%" PRI_QUEUE "", + event, queue); + if (EM_CHECK_LEVEL >= 1) + RETURN_ERROR_IF(!queue_allocated(q_elem) || !q_elem->scheduled, + EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_MARK_SEND, + "Inv.queue:%" PRI_QUEUE " type:%" PRI_QTYPE "", + queue, q_elem->type); + + event_hdr_t *ev_hdr = event_to_hdr(event); + + /* avoid unnecessary writing 'undef' in case event is a ref */ + if (ev_hdr->egrp != EM_EVENT_GROUP_UNDEF) + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + + evstate_usr2em(event, ev_hdr, EVSTATE__MARK_SEND); + + /* + * Data memory barrier, we are bypassing em_send(), odp_queue_enq() + * and need to guarantee memory sync before the event ends up into an + * EM queue again. + */ + odp_mb_full(); + + return EM_OK; +} + +em_status_t em_event_unmark_send(em_event_t event) +{ + if (!esv_enabled()) + return EM_OK; + + /* Check all args */ + if (EM_CHECK_LEVEL >= 1) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_SEND, + "Inv.args: event:%" PRI_EVENT "", event); + + event_hdr_t *ev_hdr = event_to_hdr(event); + + evstate_unmark_send(event, ev_hdr); + + return EM_OK; +} + +void em_event_mark_free(em_event_t event) +{ + if (!esv_enabled()) + return; + + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE, + "Event undefined!"); + return; + } + + event_hdr_t *const ev_hdr = event_to_hdr(event); + + evstate_free(event, ev_hdr, EVSTATE__MARK_FREE); + + if (is_vector_type(event)) + event_vector_prepare_free_full(event, EVSTATE__MARK_FREE); +} + +void em_event_unmark_free(em_event_t event) +{ + if (!esv_enabled()) + return; + + if (unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE, + "Event undefined!"); + return; + } + + event_hdr_t *const ev_hdr = event_to_hdr(event); + + evstate_unmark_free(event, ev_hdr); +} + +void em_event_mark_free_multi(const em_event_t events[], int num) +{ + if (!esv_enabled()) + return; + + if (unlikely(!events || num < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_MARK_FREE_MULTI, + "Inv.args: events[]:%p num:%d", events, num); + return; + } + if (unlikely(num == 0)) + return; + + if (EM_CHECK_LEVEL > 1) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_MARK_FREE_MULTI, + "events[%d] undefined!", i); + return; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + evstate_free_multi(events, ev_hdrs, num, EVSTATE__MARK_FREE_MULTI); + + for (int i = 0; i < num; i++) { + if (is_vector_type(events[i])) + event_vector_prepare_free_full(events[i], EVSTATE__MARK_FREE_MULTI); + } +} + +void em_event_unmark_free_multi(const em_event_t events[], int num) +{ + if (!esv_enabled()) + return; + + if (unlikely(!events || num < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, + "Inv.args: events[]:%p num:%d", events, num); + return; + } + if (unlikely(num == 0)) + return; + + if (EM_CHECK_LEVEL > 1) { + int i; + + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, + EM_ESCOPE_EVENT_UNMARK_FREE_MULTI, + "events[%d] undefined!", i); + return; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + evstate_unmark_free_multi(events, ev_hdrs, num); +} + +em_event_t em_event_clone(em_event_t event, em_pool_t pool/*or EM_POOL_UNDEF*/) +{ + const mpool_elem_t *pool_elem = pool_elem_get(pool); + + /* Check all args */ + if (EM_CHECK_LEVEL >= 1 && + unlikely(event == EM_EVENT_UNDEF || + (pool != EM_POOL_UNDEF && + (pool_elem == NULL || !pool_allocated(pool_elem))))) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_CLONE, + "Inv.args: event:%" PRI_EVENT " pool:%" PRI_POOL "", + event, pool); + return EM_EVENT_UNDEF; + } + + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_evtype = odp_event_type(odp_event); + odp_pool_t odp_pool = ODP_POOL_INVALID; + odp_packet_t pkt = ODP_PACKET_INVALID; + odp_buffer_t buf = ODP_BUFFER_INVALID; + + if (unlikely(odp_evtype != ODP_EVENT_PACKET && + odp_evtype != ODP_EVENT_BUFFER)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_CLONE, + "Inv. odp-event-type:%d", odp_evtype); + return EM_EVENT_UNDEF; + } + + /* Obtain the event-hdr, event-size and the pool to use */ + const event_hdr_t *ev_hdr; + uint32_t size; + em_event_type_t type; + em_pool_t em_pool = pool; + em_event_t clone_event; /* return value */ + + if (odp_evtype == ODP_EVENT_PACKET) { + pkt = odp_packet_from_event(odp_event); + ev_hdr = odp_packet_user_area(pkt); + size = odp_packet_seg_len(pkt); + if (pool == EM_POOL_UNDEF) { + odp_pool = odp_packet_pool(pkt); + em_pool = pool_odp2em(odp_pool); + } + } else /* ODP_EVENT_BUFFER */ { + buf = odp_buffer_from_event(odp_event); + ev_hdr = odp_buffer_addr(buf); + size = ev_hdr->event_size; + if (pool == EM_POOL_UNDEF) { + odp_pool = odp_buffer_pool(buf); + em_pool = pool_odp2em(odp_pool); + } + } + + /* No EM-pool found */ + if (em_pool == EM_POOL_UNDEF) { + if (unlikely(odp_evtype == ODP_EVENT_BUFFER)) { + INTERNAL_ERROR(EM_ERR_NOT_FOUND, EM_ESCOPE_EVENT_CLONE, + "No suitable event-pool found"); + return EM_EVENT_UNDEF; + } + /* odp_evtype == ODP_EVENT_PACKET: + * Not an EM-pool, e.g. event from external pktio odp-pool. + * Allocate and clone pkt via ODP directly. + */ + clone_event = pkt_clone_odp(pkt, odp_pool); + if (unlikely(clone_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_EVENT_CLONE, + "Cloning from ext odp-pool:%" PRIu64 " failed", + odp_pool_to_u64(odp_pool)); + } + return clone_event; + } + + /* + * Clone the event from an EM-pool: + */ + pool_elem = pool_elem_get(em_pool); + type = ev_hdr->event_type; + + /* EM event pools created with type=SW can not support pkt events */ + if (unlikely(pool_elem->event_type == EM_EVENT_TYPE_SW && + em_event_type_major(type) == EM_EVENT_TYPE_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_CLONE, + "EM-pool:%s(%" PRI_POOL "):\n" + "Invalid event type:0x%x for buf", + pool_elem->name, em_pool, type); + return EM_EVENT_UNDEF; + } + + event_hdr_t *clone_hdr = event_alloc(pool_elem, size, type); + + if (unlikely(!clone_hdr)) { + em_status_t err = + INTERNAL_ERROR(EM_ERR_ALLOC_FAILED, EM_ESCOPE_EVENT_CLONE, + "EM-pool:'%s': sz:%u type:0x%x pool:%" PRI_POOL "", + pool_elem->name, size, type, em_pool); + if (EM_CHECK_LEVEL > 1 && err != EM_OK && + em_shm->opt.pool.statistics_enable) + em_pool_info_print(em_pool); + return EM_EVENT_UNDEF; + } + + clone_event = clone_hdr->event; + /* Update clone_event ESV state for the clone-alloc */ + if (esv_enabled()) + clone_event = evstate_clone(clone_event, clone_hdr); + + /* Call the 'alloc' API hook function also for event-clone */ + if (EM_API_HOOKS_ENABLE) + call_api_hooks_alloc(&clone_event, 1, 1, size, type, pool); + + /* Copy event payload from the parent event into the clone event */ + const void *src = event_pointer(event); + void *dst = event_pointer(clone_event); + + memcpy(dst, src, size); + + return clone_event; +} + +static int event_uarea_init(em_event_t event, event_hdr_t **ev_hdr/*out*/) +{ + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_evtype = odp_event_type(odp_event); + odp_pool_t odp_pool = ODP_POOL_INVALID; + odp_packet_t odp_pkt; + odp_buffer_t odp_buf; + odp_packet_vector_t odp_pktvec; + event_hdr_t *hdr; + bool is_init; + + switch (odp_evtype) { + case ODP_EVENT_PACKET: + odp_pkt = odp_packet_from_event(odp_event); + hdr = odp_packet_user_area(odp_pkt); + is_init = hdr->user_area.isinit; + if (!is_init) + odp_pool = odp_packet_pool(odp_pkt); + break; + case ODP_EVENT_BUFFER: + odp_buf = odp_buffer_from_event(odp_event); + hdr = odp_buffer_addr(odp_buf); + is_init = hdr->user_area.isinit; + if (!is_init) + odp_pool = odp_buffer_pool(odp_buf); + break; + case ODP_EVENT_PACKET_VECTOR: + odp_pktvec = odp_packet_vector_from_event(odp_event); + hdr = odp_packet_vector_user_area(odp_pktvec); + is_init = hdr->user_area.isinit; + if (!is_init) + odp_pool = odp_packet_vector_pool(odp_pktvec); + break; + default: + return -1; + } + + *ev_hdr = hdr; + + if (!is_init) { + /* + * Event user area metadata is not initialized in + * the event header - initialize it: + */ + hdr->user_area.all = 0; /* user_area.{} = all zero (.sizes=0) */ + hdr->user_area.isinit = 1; + + em_pool_t pool = pool_odp2em(odp_pool); + + if (pool == EM_POOL_UNDEF) + return 0; /* ext ODP pool: OK, no user area, sz=0 */ + + /* Event from an EM event pool, can init event user area */ + const mpool_elem_t *pool_elem = pool_elem_get(pool); + + if (unlikely(!pool_elem)) + return -2; /* invalid pool_elem */ + + hdr->user_area.req_size = pool_elem->user_area.req_size; + hdr->user_area.pad_size = pool_elem->user_area.pad_size; + } + + return 0; +} + +void *em_event_uarea_get(em_event_t event, size_t *size /*out, if given*/) +{ + /* Check args */ + if (EM_CHECK_LEVEL >= 1 && + unlikely(event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_GET, + "Inv.arg: event undef"); + goto no_uarea; + } + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_EVENT_UAREA_GET, + "Cannot init event user area: %d", err); + goto no_uarea; + } + + if (ev_hdr->user_area.req_size == 0) + goto no_uarea; + + /* + * Event has user area configured, return pointer and size + */ + void *uarea_ptr = (void *)((uintptr_t)ev_hdr + sizeof(event_hdr_t)); + + if (size) + *size = ev_hdr->user_area.req_size; + + return uarea_ptr; + +no_uarea: + if (size) + *size = 0; + return NULL; +} + +em_status_t em_event_uarea_id_set(em_event_t event, uint16_t id) +{ + /* Check args */ + if (EM_CHECK_LEVEL >= 1) + RETURN_ERROR_IF(event == EM_EVENT_UNDEF, + EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_SET, + "Inv.arg: event undef"); + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + RETURN_ERROR_IF(err, EM_ERR_OPERATION_FAILED, + EM_ESCOPE_EVENT_UAREA_ID_SET, + "Cannot init event user area: %d", err); + + ev_hdr->user_area.id = id; + ev_hdr->user_area.isset_id = 1; + + return EM_OK; +} + +em_status_t em_event_uarea_id_get(em_event_t event, bool *isset /*out*/, + uint16_t *id /*out*/) +{ + bool id_set = false; + em_status_t status = EM_OK; + + /* Check args, either 'isset' or 'id' ptrs must be provided (or both) */ + if (EM_CHECK_LEVEL >= 1 && + (event == EM_EVENT_UNDEF || !(id || isset))) { + status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_ID_GET, + "Inv.args: event:%" PRI_EVENT " isset:%p id:%p", + event, isset, id); + goto id_isset; + } + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (unlikely(err)) { + status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_EVENT_UAREA_ID_GET, + "Cannot init event user area: %d", err); + goto id_isset; + } + + if (ev_hdr->user_area.isset_id) { + /* user-area-id has been set */ + id_set = true; + if (id) + *id = ev_hdr->user_area.id; /*out*/ + } + +id_isset: + if (isset) + *isset = id_set; /*out*/ + return status; +} + +em_status_t em_event_uarea_info(em_event_t event, + em_event_uarea_info_t *uarea_info /*out*/) +{ + em_status_t status = EM_ERROR; + + /* Check args */ + if (EM_CHECK_LEVEL >= 1 && + unlikely(event == EM_EVENT_UNDEF || !uarea_info)) { + status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_UAREA_INFO, + "Inv.args: event:%" PRI_EVENT " uarea_info:%p", + event, uarea_info); + goto err_uarea; + } + + event_hdr_t *ev_hdr = NULL; + int err = event_uarea_init(event, &ev_hdr/*out*/); + + if (unlikely(err)) { + status = INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_EVENT_UAREA_INFO, + "Cannot init event user area: %d", err); + goto err_uarea; + } + + if (ev_hdr->user_area.req_size == 0) { + uarea_info->uarea = NULL; + uarea_info->size = 0; + } else { + uarea_info->uarea = (void *)((uintptr_t)ev_hdr + + sizeof(event_hdr_t)); + uarea_info->size = ev_hdr->user_area.req_size; + } + + if (ev_hdr->user_area.isset_id) { + uarea_info->id.isset = true; + uarea_info->id.value = ev_hdr->user_area.id; + } else { + uarea_info->id.isset = false; + uarea_info->id.value = 0; + } + + return EM_OK; + +err_uarea: + if (uarea_info) { + uarea_info->uarea = NULL; + uarea_info->size = 0; + uarea_info->id.isset = false; + uarea_info->id.value = 0; + } + return status; +} + +em_event_t em_event_ref(em_event_t event) +{ + /* Check args */ + if (unlikely(EM_CHECK_LEVEL >= 1 && event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_REF, + "Invalid arg: event:%" PRI_EVENT "", event); + return EM_EVENT_UNDEF; + } + + odp_event_t odp_event = event_em2odp(event); + odp_event_type_t odp_etype = odp_event_type(odp_event); + + if (unlikely(odp_etype != ODP_EVENT_PACKET)) { + INTERNAL_ERROR(EM_ERR_NOT_IMPLEMENTED, EM_ESCOPE_EVENT_REF, + "Event not a packet! Refs not supported for odp-events of type:%d", + odp_etype); + return EM_EVENT_UNDEF; + } + + odp_packet_t odp_pkt = odp_packet_from_event(odp_event); + odp_packet_t pkt_ref = odp_packet_ref_static(odp_pkt); + event_hdr_t *ev_hdr = event_to_hdr(event); + + if (unlikely(pkt_ref == ODP_PACKET_INVALID)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_EVENT_REF, + "ODP failure in odp_packet_ref_static()"); + return EM_EVENT_UNDEF; + } + + if (unlikely(EM_CHECK_LEVEL >= 3 && odp_pkt != pkt_ref)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_NOT_IMPLEMENTED), EM_ESCOPE_EVENT_REF, + "EM assumes all refs use the same handle"); + odp_packet_free(odp_pkt); + return EM_EVENT_UNDEF; + } + + /* + * Indicate that this event has references and some of the ESV checks + * must be omitted (evgen) - 'refs_used' will be set for the whole + * lifetime of this event, i.e. until the event is freed back into the + * pool. Important only for the first call of em_event_ref(), subsequent + * calls write same value. + */ + ev_hdr->flags.refs_used = 1; + + em_event_t ref = event; + + if (esv_enabled()) + ref = evstate_ref(event, ev_hdr); + + return ref; +} + +bool em_event_has_ref(em_event_t event) +{ + /* Check args */ + if (unlikely(EM_CHECK_LEVEL >= 1 && event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_HAS_REF, + "Invalid arg: event:%" PRI_EVENT "", event); + return false; + } + + return event_has_ref(event); +} + +void em_event_vector_free(em_event_t vector_event) +{ + if (EM_CHECK_LEVEL >= 1 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_FREE, + "Invalid args: vector_event:%" PRI_EVENT "", + vector_event); + return; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_FREE))) { + return; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_free(&vector_event, 1); + + if (esv_enabled()) { + event_hdr_t *const ev_hdr = eventvec_to_hdr(vector_event); + + evstate_free(vector_event, ev_hdr, EVSTATE__EVENT_VECTOR_FREE); + } + + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + + odp_packet_vector_free(pkt_vec); +} + +uint32_t em_event_vector_tbl(em_event_t vector_event, + em_event_t **event_tbl/*out*/) +{ + if (EM_CHECK_LEVEL >= 1 && + unlikely(vector_event == EM_EVENT_UNDEF || !event_tbl)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_TBL, + "Invalid args: vector_event:%" PRI_EVENT " event_tbl:%p", + vector_event, event_tbl); + return 0; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_TBL))) { + *event_tbl = NULL; + return 0; + } + + return event_vector_tbl(vector_event, event_tbl /*out*/); +} + +uint32_t em_event_vector_size(em_event_t vector_event) +{ + if (EM_CHECK_LEVEL >= 1 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_SIZE, + "Invalid arg, vector_event undefined!", vector_event); + return 0; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_SIZE))) + return 0; + + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + + return odp_packet_vector_size(pkt_vec); +} + +void em_event_vector_size_set(em_event_t vector_event, uint32_t size) +{ + if (EM_CHECK_LEVEL >= 1 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_SIZE_SET, + "Invalid arg, vector_event undefined!", vector_event); + return; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_SIZE_SET))) + return; + + odp_event_t odp_event = event_em2odp(vector_event); + odp_packet_vector_t pkt_vec = odp_packet_vector_from_event(odp_event); + + odp_packet_vector_size_set(pkt_vec, size); +} + +uint32_t em_event_vector_max_size(em_event_t vector_event) +{ + if (EM_CHECK_LEVEL >= 1 && + unlikely(vector_event == EM_EVENT_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_MAX_SIZE, + "Invalid arg, vector_event undefined!", vector_event); + return 0; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_MAX_SIZE))) + return 0; + + uint32_t max_size = 0; + em_status_t err = event_vector_max_size(vector_event, &max_size, + EM_ESCOPE_EVENT_VECTOR_MAX_SIZE); + if (unlikely(err != EM_OK)) + return 0; + + return max_size; +} + +em_status_t em_event_vector_info(em_event_t vector_event, + em_event_vector_info_t *vector_info /*out*/) +{ + em_status_t status = EM_ERROR; + + /* Check args */ + if (EM_CHECK_LEVEL >= 1 && + unlikely(vector_event == EM_EVENT_UNDEF || !vector_info)) { + status = INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_EVENT_VECTOR_INFO, + "Invalid args: vector_event:%" PRI_EVENT " vector_info:%p", + vector_event, vector_info); + goto err_vecinfo; + } + + if (EM_CHECK_LEVEL > 2 && + unlikely(!is_vector_type_or_error(vector_event, EM_ESCOPE_EVENT_VECTOR_INFO))) { + status = EM_ERR_BAD_ID; + goto err_vecinfo; + } + + /* Get the max size */ + status = event_vector_max_size(vector_event, &vector_info->max_size, + EM_ESCOPE_EVENT_VECTOR_INFO); + if (unlikely(status != EM_OK)) + goto err_vecinfo; + + /* Get vector size and the event-table */ + vector_info->size = event_vector_tbl(vector_event, &vector_info->event_tbl/*out*/); + + return EM_OK; + +err_vecinfo: + if (vector_info) { + vector_info->event_tbl = NULL; + vector_info->size = 0; + vector_info->max_size = 0; + } + return status; +} diff --git a/src/event_machine_event_group.c b/src/event_machine_event_group.c index edb61fd1..aaa9dc4b 100644 --- a/src/event_machine_event_group.c +++ b/src/event_machine_event_group.c @@ -1,740 +1,759 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "em_include.h" - -/* per core (thread) state for em_event_group_get_next() */ -static ENV_LOCAL unsigned int _egrp_tbl_iter_idx; - -em_event_group_t -em_event_group_create(void) -{ - em_event_group_t egrp; - event_group_elem_t *egrp_elem; - - egrp = event_group_alloc(); - if (unlikely(egrp == EM_EVENT_GROUP_UNDEF)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_CREATE, - "Event group alloc failed!"); - return EM_EVENT_GROUP_UNDEF; - } - - egrp_elem = event_group_elem_get(egrp); - - /* Alloc succeeded, return event group handle */ - egrp_elem->ready = 1; /* Set group ready to be applied */ - return egrp; -} - -em_status_t -em_event_group_delete(em_event_group_t event_group) -{ - em_status_t status; - event_group_elem_t *const egrp_elem = - event_group_elem_get(event_group); - egrp_counter_t egrp_count; - uint64_t count; - - RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), - EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_DELETE, - "Invalid event group: %" PRI_EGRP "", event_group); - - egrp_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); - - if (EM_EVENT_GROUP_SAFE_MODE) - count = egrp_count.count; - else - count = egrp_count.all; - - RETURN_ERROR_IF(count != 0, EM_ERR_NOT_FREE, - EM_ESCOPE_EVENT_GROUP_DELETE, - "Event group:%" PRI_EGRP " count not zero!", - event_group); - - /* set num_notif = 0, ready = 0 */ - egrp_elem->all = 0; - - status = event_group_free(event_group); - RETURN_ERROR_IF(status != EM_OK, - status, EM_ESCOPE_EVENT_GROUP_DELETE, - "Event Group delete failed!"); - - return EM_OK; -} - -em_status_t -em_event_group_apply(em_event_group_t event_group, int count, - int num_notif, const em_notif_t notif_tbl[]) -{ - int i; - uint64_t egrp_count; - em_status_t ret; - - event_group_elem_t *const egrp_elem = - event_group_elem_get(event_group); - - RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), - EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_APPLY, - "Invalid event group: %" PRI_EGRP "", event_group); - - RETURN_ERROR_IF(count <= 0, - EM_ERR_TOO_LARGE, EM_ESCOPE_EVENT_GROUP_APPLY, - "Invalid argument: count %i", count); - - ret = check_notif_tbl(num_notif, notif_tbl); - RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EVENT_GROUP_APPLY, - "Invalid notif cfg given!"); - - if (EM_EVENT_GROUP_SAFE_MODE) - egrp_count = egrp_elem->post.count; - else - egrp_count = egrp_elem->post.all; - - RETURN_ERROR_IF(egrp_count != 0 || egrp_elem->ready == 0, - EM_ERR_NOT_FREE, EM_ESCOPE_EVENT_GROUP_APPLY, - "Event group %" PRI_EGRP " currently in use! count: %i", - event_group, egrp_count); - - if (EM_EVENT_GROUP_SAFE_MODE) { - egrp_elem->post.count = count; - /* Event group generation increments when _apply() is called */ - egrp_elem->post.gen++; - egrp_elem->pre.all = egrp_elem->post.all; - } else { - egrp_elem->post.all = count; - } - - egrp_elem->ready = 0; - egrp_elem->num_notif = num_notif; - - for (i = 0; i < num_notif; i++) { - egrp_elem->notif_tbl[i].event = notif_tbl[i].event; - egrp_elem->notif_tbl[i].queue = notif_tbl[i].queue; - egrp_elem->notif_tbl[i].egroup = notif_tbl[i].egroup; - } - - /* Sync mem */ - env_sync_mem(); - - return EM_OK; -} - -em_status_t -em_event_group_increment(int count) -{ - const em_locm_t *const locm = &em_locm; - em_event_group_t const egrp = em_event_group_current(); - event_group_elem_t *egrp_elem = NULL; - - if (egrp != EM_EVENT_GROUP_UNDEF) - egrp_elem = locm->current.egrp_elem; - - RETURN_ERROR_IF(egrp_elem == NULL, - EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_INCREMENT, - "No current event group (%" PRI_EGRP ")", egrp); - - RETURN_ERROR_IF(!event_group_allocated(egrp_elem) || egrp_elem->ready, - EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_INCREMENT, - "Current event group in a bad state (%" PRI_EGRP ")", - egrp); - - if (!EM_EVENT_GROUP_SAFE_MODE) { - EM_ATOMIC_ADD(&egrp_elem->post.atomic, count); - return EM_OK; - } - - egrp_counter_t current_count; - egrp_counter_t new_count; - /* Add to post counter before count is zero or generation mismatch */ - do { - current_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); - - RETURN_ERROR_IF(current_count.count <= 0 || - current_count.gen != locm->current.egrp_gen, - EM_ERR_BAD_STATE, - EM_ESCOPE_EVENT_GROUP_INCREMENT, - "Expired event group (%" PRI_EGRP ")", - egrp); - - new_count = current_count; - new_count.count += count; - } while (!EM_ATOMIC_CMPSET(&egrp_elem->post.atomic, - current_count.all, new_count.all)); - - /* Add to pre counter if generation matches */ - do { - current_count.all = EM_ATOMIC_GET(&egrp_elem->pre.atomic); - - RETURN_ERROR_IF(current_count.gen != locm->current.egrp_gen, - EM_ERR_BAD_STATE, - EM_ESCOPE_EVENT_GROUP_INCREMENT, - "Expired event group (%" PRI_EGRP ")", - egrp); - - new_count = current_count; - new_count.count += count; - } while (!EM_ATOMIC_CMPSET(&egrp_elem->pre.atomic, - current_count.all, new_count.all)); - - return EM_OK; -} - -int -em_event_group_is_ready(em_event_group_t event_group) -{ - const event_group_elem_t *egrp_elem = - event_group_elem_get(event_group); - - if (unlikely(egrp_elem == NULL || !event_group_allocated(egrp_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_IS_READY, - "Invalid event group: %" PRI_EGRP "", - event_group); - return EM_FALSE; - } - - uint64_t count; - - if (EM_EVENT_GROUP_SAFE_MODE) - count = egrp_elem->post.count; - else - count = egrp_elem->post.all; - - if (count == 0 && egrp_elem->ready) - return EM_TRUE; - else - return EM_FALSE; -} - -em_event_group_t -em_event_group_current(void) -{ - em_locm_t *const locm = &em_locm; - - if (!EM_EVENT_GROUP_SAFE_MODE) - return locm->current.egrp; - - if (locm->current.egrp == EM_EVENT_GROUP_UNDEF) - return EM_EVENT_GROUP_UNDEF; - - const event_group_elem_t *egrp_elem = locm->current.egrp_elem; - egrp_counter_t current; - - if (egrp_elem == NULL) - return EM_EVENT_GROUP_UNDEF; - - current.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); - - if (locm->current.egrp_gen != current.gen || current.count <= 0) - locm->current.egrp = EM_EVENT_GROUP_UNDEF; - - return locm->current.egrp; -} - -em_status_t -em_send_group(em_event_t event, em_queue_t queue, - em_event_group_t event_group) -{ - const event_group_elem_t *egrp_elem = event_group_elem_get(event_group); - const bool is_external = queue_external(queue); - queue_elem_t *q_elem = NULL; - event_hdr_t *ev_hdr; - em_status_t stat; - - /* - * Check all args - */ - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF, - EM_ERR_BAD_ID, EM_ESCOPE_SEND_GROUP, "Invalid event"); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && - event_group != EM_EVENT_GROUP_UNDEF && !egrp_elem, - EM_ERR_NOT_FOUND, EM_ESCOPE_SEND_GROUP, - "Invalid event group:%" PRI_EGRP "", event_group); - - ev_hdr = event_to_hdr(event); - - if (!is_external) { - /* queue belongs to this EM instance */ - q_elem = queue_elem_get(queue); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !q_elem, - EM_ERR_BAD_ID, EM_ESCOPE_SEND_GROUP, - "Invalid queue:%" PRI_QUEUE "", queue); - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !queue_allocated(q_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_SEND_GROUP, - "Invalid queue:%" PRI_QUEUE "", queue); - } - - RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && event_group != EM_EVENT_GROUP_UNDEF && - !event_group_allocated(egrp_elem), - EM_ERR_BAD_STATE, EM_ESCOPE_SEND_GROUP, - "Invalid event group:%" PRI_EGRP "", event_group); - - /* Buffer events sent from EO-start to scheduled queues */ - if (unlikely(!is_external && - q_elem->scheduled && em_locm.start_eo_elem)) { - /* - * em_send_group() called from within an EO-start function: - * all events sent to scheduled queues will be buffered - * and sent when the EO-start operation completes. - */ - int num = eo_start_buffer_events(&event, 1, queue, event_group); - - stat = num == 1 ? EM_OK : EM_ERR_OPERATION_FAILED; - if (EM_CHECK_LEVEL == 0) - return stat; - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND_GROUP, - "send-group from EO-start failed"); - return EM_OK; - } - - /* Store the event group information in the event header */ - if (egrp_elem) { - ev_hdr->egrp = egrp_elem->event_group; - if (EM_EVENT_GROUP_SAFE_MODE) - ev_hdr->egrp_gen = event_group_gen_get(egrp_elem); - } else { - ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(&event, 1, queue, event_group); - - if (esv_enabled()) - evstate_usr2em(event, ev_hdr, EVSTATE__SEND_EGRP); - - if (is_external) { - /* - * Send out of EM to another device via event-chaining and a - * user-provided function 'event_send_device()' - */ - stat = send_chaining_egrp(event, ev_hdr, queue, egrp_elem); - if (EM_CHECK_LEVEL == 0) - return stat; - if (unlikely(stat != EM_OK)) { - stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND_GROUP, - "send_chaining_egrp: Q:%" PRI_QUEUE "", - queue); - goto send_group_err; - } - - return EM_OK; - } - - /* - * Normal send to a queue on this device - */ - switch (q_elem->type) { - case EM_QUEUE_TYPE_ATOMIC: - case EM_QUEUE_TYPE_PARALLEL: - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - stat = send_event(event, q_elem); - break; - case EM_QUEUE_TYPE_UNSCHEDULED: - stat = queue_unsched_enqueue(event, q_elem); - break; - case EM_QUEUE_TYPE_LOCAL: - stat = send_local(event, ev_hdr, q_elem); - break; - default: - stat = EM_ERR_NOT_FOUND; - break; - } - - if (EM_CHECK_LEVEL == 0) - return stat; - - if (unlikely(stat != EM_OK)) { - stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND_GROUP, - "Q:%" PRI_QUEUE " type:%" PRI_QTYPE "", - queue, q_elem->type); - goto send_group_err; - } - - return EM_OK; - -send_group_err: - if (esv_enabled()) - evstate_usr2em_revert(event, ev_hdr, EVSTATE__SEND_EGRP__FAIL); - return stat; -} - -/* - * em_send_group_multi() helper: check function arguments - */ -static inline em_status_t -send_grpmulti_check_args(const em_event_t events[], int num, em_queue_t queue, - em_event_group_t event_group, - const event_group_elem_t *egrp_elem, - bool *is_external__out /*out if EM_OK*/, - queue_elem_t **q_elem__out /*out if EM_OK*/) -{ - const bool is_external = queue_external(queue); - queue_elem_t *q_elem = NULL; - int i; - - if (EM_CHECK_LEVEL > 0 && - unlikely(!events || num <= 0 || - (event_group != EM_EVENT_GROUP_UNDEF && !egrp_elem))) - return EM_ERR_BAD_ARG; - - if (EM_CHECK_LEVEL > 1 && - unlikely(event_group != EM_EVENT_GROUP_UNDEF && - !event_group_allocated(egrp_elem))) - return EM_ERR_BAD_STATE; - - if (EM_CHECK_LEVEL > 2) { - for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) - ; - if (unlikely(i != num)) - return EM_ERR_BAD_POINTER; - } - - if (!is_external) { - /* queue belongs to this EM instance */ - q_elem = queue_elem_get(queue); - - if (EM_CHECK_LEVEL > 0 && unlikely(!q_elem)) - return EM_ERR_BAD_ARG; - if (EM_CHECK_LEVEL > 1 && unlikely(!queue_allocated(q_elem))) - return EM_ERR_BAD_STATE; - } - - *is_external__out = is_external; - *q_elem__out = q_elem; /* NULL if is_external */ - return EM_OK; -} - -int -em_send_group_multi(const em_event_t events[], int num, em_queue_t queue, - em_event_group_t event_group) -{ - const event_group_elem_t *egrp_elem = event_group_elem_get(event_group); - bool is_external = false; /* set by check_args */ - queue_elem_t *q_elem = NULL; /* set by check_args */ - int num_sent; - int i; - - /* - * Check all args. - */ - em_status_t err = - send_grpmulti_check_args(events, num, queue, event_group, egrp_elem, - /*out if EM_OK:*/ &is_external, &q_elem); - if (unlikely(err != EM_OK)) { - INTERNAL_ERROR(err, EM_ESCOPE_SEND_GROUP_MULTI, - "Invalid args: events:%p num:%d\n" - "Q:%" PRI_QUEUE " event_group:%" PRI_EGRP "", - events, num, queue, event_group); - return 0; - } - - /* Buffer events sent from EO-start to scheduled queues */ - if (unlikely(!is_external && - q_elem->scheduled && em_locm.start_eo_elem)) { - /* - * em_send_group_multi() called from within an EO-start - * function: all events sent to scheduled queues will be - * buffered and sent when the EO-start operation completes. - */ - num_sent = eo_start_buffer_events(events, num, queue, event_group); - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_GROUP_MULTI, - "send-egrp-multi EO-start:req:%d sent:%d", - num, num_sent); - } - return num_sent; - } - - event_hdr_t *ev_hdrs[num]; - - event_to_hdr_multi(events, ev_hdrs, num); - - /* Store the event group information in the event header */ - for (i = 0; i < num; i++) - ev_hdrs[i]->egrp = event_group; /* can be EM_EVENT_GROUP_UNDEF*/ - - if (EM_EVENT_GROUP_SAFE_MODE && egrp_elem) { - uint64_t egrp_gen = event_group_gen_get(egrp_elem); - - for (i = 0; i < num; i++) - ev_hdrs[i]->egrp_gen = egrp_gen; - } - - if (EM_API_HOOKS_ENABLE) - call_api_hooks_send(events, num, queue, event_group); - - if (esv_enabled()) - evstate_usr2em_multi(events, ev_hdrs, num, - EVSTATE__SEND_EGRP_MULTI); - - if (is_external) { - /* - * Send out of EM to another device via event-chaining and a - * user-provided function 'event_send_device_multi()' - */ - num_sent = send_chaining_egrp_multi(events, ev_hdrs, num, - queue, egrp_elem); - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { - INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_SEND_GROUP_MULTI, - "send_chaining_egrp_multi: req:%d, sent:%d", - num, num_sent); - goto send_group_multi_err; - } - - return num_sent; - } - - /* - * Normal send to a queue on this device - */ - switch (q_elem->type) { - case EM_QUEUE_TYPE_ATOMIC: - case EM_QUEUE_TYPE_PARALLEL: - case EM_QUEUE_TYPE_PARALLEL_ORDERED: - num_sent = send_event_multi(events, num, q_elem); - break; - case EM_QUEUE_TYPE_LOCAL: - num_sent = send_local_multi(events, ev_hdrs, num, q_elem); - break; - default: - num_sent = 0; - break; - } - - if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_GROUP_MULTI, - "send-egrp-multi failed: req:%d, sent:%d", - num, num_sent); - goto send_group_multi_err; - } - - return num_sent; - -send_group_multi_err: - if (esv_enabled()) { - evstate_usr2em_revert_multi(&events[num_sent], &ev_hdrs[num_sent], - num - num_sent, - EVSTATE__SEND_EGRP_MULTI__FAIL); - } - return num_sent; -} - -void -em_event_group_processing_end(void) -{ - em_locm_t *const locm = &em_locm; - const em_event_group_t event_group = em_event_group_current(); - - if (unlikely(invalid_egrp(event_group))) - return; - - /* - * Atomically decrement the event group count. - * If new count is zero, send notification events. - */ - event_group_count_decrement(locm->current.rcv_multi_cnt); - - locm->current.egrp = EM_EVENT_GROUP_UNDEF; - locm->current.egrp_elem = NULL; -} - -em_status_t -em_event_group_assign(em_event_group_t event_group) -{ - em_locm_t *const locm = &em_locm; - event_group_elem_t *const egrp_elem = - event_group_elem_get(event_group); - - RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), - EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_ASSIGN, - "Invalid event group: %" PRI_EGRP "", event_group); - - RETURN_ERROR_IF(locm->current.egrp != EM_EVENT_GROUP_UNDEF, - EM_ERR_BAD_CONTEXT, EM_ESCOPE_EVENT_GROUP_ASSIGN, - "Cannot assign event group %" PRI_EGRP ",\n" - "event already belongs to event group %" PRI_EGRP "", - event_group, locm->current.egrp); - - RETURN_ERROR_IF(egrp_elem->ready, - EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_ASSIGN, - "Cannot assign event group %" PRI_EGRP ",\n" - "Event group has not been applied", event_group); - - locm->current.egrp = event_group; - locm->current.egrp_elem = egrp_elem; - - if (EM_EVENT_GROUP_SAFE_MODE) - locm->current.egrp_gen = egrp_elem->post.gen; - - return EM_OK; -} - -/* - * Abort is successful if generation can be incremented before post_count - * reaches zero. - */ -em_status_t -em_event_group_abort(em_event_group_t event_group) -{ - event_group_elem_t *const egrp_elem = - event_group_elem_get(event_group); - - RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), - EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_ABORT, - "Invalid event group: %" PRI_EGRP "", event_group); - - if (!EM_EVENT_GROUP_SAFE_MODE) { - RETURN_ERROR_IF(egrp_elem->post.all <= 0, - EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_ABORT, - "Event group abort too late, notifs already sent"); - egrp_elem->post.all = 0; - /* mark group ready for new apply and stop notifs */ - egrp_elem->ready = 1; - return EM_OK; - } - - egrp_counter_t current_count; - egrp_counter_t new_count; - - /* Attemp to set count to zero before count reaches zero */ - do { - current_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); - - RETURN_ERROR_IF(current_count.count <= 0, - EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_ABORT, - "Event group abort late, notifs already sent"); - new_count = current_count; - new_count.count = 0; - } while (!EM_ATOMIC_CMPSET(&egrp_elem->post.atomic, - current_count.all, new_count.all)); - /* - * Change pre_count also to prevent expired event group events - * from reaching receive function. - */ - EM_ATOMIC_SET(&egrp_elem->pre.atomic, new_count.all); - /* Ready for new apply */ - egrp_elem->ready = 1; - - return EM_OK; -} - -int -em_event_group_get_notif(em_event_group_t event_group, - int max_notif, em_notif_t notif_tbl[]) -{ - const event_group_elem_t *egrp_elem = - event_group_elem_get(event_group); - int num_notif = 0; /* return value */ - - if (unlikely(egrp_elem == NULL || !event_group_allocated(egrp_elem) || - max_notif < 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_GET_NOTIF, - "Invalid args: evgrp:%" PRI_EGRP ", notifs:%d", - event_group, max_notif); - return 0; - } - - if (unlikely(max_notif == 0)) - return 0; - - if (unlikely(notif_tbl == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, - EM_ESCOPE_EVENT_GROUP_GET_NOTIF, - "Invalid notif_tbl[] given"); - return 0; - } - - if (!egrp_elem->ready) { - int i; - - num_notif = max_notif < egrp_elem->num_notif ? - max_notif : egrp_elem->num_notif; - - for (i = 0; i < num_notif; i++) { - notif_tbl[i].event = egrp_elem->notif_tbl[i].event; - notif_tbl[i].queue = egrp_elem->notif_tbl[i].queue; - notif_tbl[i].egroup = egrp_elem->notif_tbl[i].egroup; - } - } - - return num_notif; -} - -em_event_group_t -em_event_group_get_first(unsigned int *num) -{ - const event_group_elem_t *const egrp_elem_tbl = - em_shm->event_group_tbl.egrp_elem; - const event_group_elem_t *egrp_elem = &egrp_elem_tbl[0]; - const unsigned int egrp_count = event_group_count(); - - _egrp_tbl_iter_idx = 0; /* reset iteration */ - - if (num) - *num = egrp_count; - - if (egrp_count == 0) { - _egrp_tbl_iter_idx = EM_MAX_EVENT_GROUPS; /* UNDEF=_get_next()*/ - return EM_EVENT_GROUP_UNDEF; - } - - /* find first */ - while (!event_group_allocated(egrp_elem)) { - _egrp_tbl_iter_idx++; - if (_egrp_tbl_iter_idx >= EM_MAX_EVENT_GROUPS) - return EM_EVENT_GROUP_UNDEF; - egrp_elem = &egrp_elem_tbl[_egrp_tbl_iter_idx]; - } - - return egrp_idx2hdl(_egrp_tbl_iter_idx); -} - -em_event_group_t -em_event_group_get_next(void) -{ - if (_egrp_tbl_iter_idx >= EM_MAX_EVENT_GROUPS - 1) - return EM_EVENT_GROUP_UNDEF; - - _egrp_tbl_iter_idx++; - - const event_group_elem_t *const egrp_elem_tbl = - em_shm->event_group_tbl.egrp_elem; - const event_group_elem_t *egrp_elem = - &egrp_elem_tbl[_egrp_tbl_iter_idx]; - - /* find next */ - while (!event_group_allocated(egrp_elem)) { - _egrp_tbl_iter_idx++; - if (_egrp_tbl_iter_idx >= EM_MAX_EVENT_GROUPS) - return EM_EVENT_GROUP_UNDEF; - egrp_elem = &egrp_elem_tbl[_egrp_tbl_iter_idx]; - } - - return egrp_idx2hdl(_egrp_tbl_iter_idx); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "em_include.h" + +/* per core (thread) state for em_event_group_get_next() */ +static ENV_LOCAL unsigned int _egrp_tbl_iter_idx; + +em_event_group_t +em_event_group_create(void) +{ + em_event_group_t egrp; + event_group_elem_t *egrp_elem; + + egrp = event_group_alloc(); + if (unlikely(egrp == EM_EVENT_GROUP_UNDEF)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_CREATE, + "Event group alloc failed!"); + return EM_EVENT_GROUP_UNDEF; + } + + egrp_elem = event_group_elem_get(egrp); + + /* Alloc succeeded, return event group handle */ + egrp_elem->ready = 1; /* Set group ready to be applied */ + return egrp; +} + +em_status_t +em_event_group_delete(em_event_group_t event_group) +{ + em_status_t status; + event_group_elem_t *const egrp_elem = + event_group_elem_get(event_group); + egrp_counter_t egrp_count; + uint64_t count; + + RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), + EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_DELETE, + "Invalid event group: %" PRI_EGRP "", event_group); + + egrp_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); + + if (EM_EVENT_GROUP_SAFE_MODE) + count = egrp_count.count; + else + count = egrp_count.all; + + RETURN_ERROR_IF(count != 0, EM_ERR_NOT_FREE, + EM_ESCOPE_EVENT_GROUP_DELETE, + "Event group:%" PRI_EGRP " count not zero!", + event_group); + + /* set num_notif = 0, ready = 0 */ + egrp_elem->all = 0; + + status = event_group_free(event_group); + RETURN_ERROR_IF(status != EM_OK, + status, EM_ESCOPE_EVENT_GROUP_DELETE, + "Event Group delete failed!"); + + return EM_OK; +} + +em_status_t +em_event_group_apply(em_event_group_t event_group, int count, + int num_notif, const em_notif_t notif_tbl[]) +{ + uint64_t egrp_count; + em_status_t ret; + + event_group_elem_t *const egrp_elem = + event_group_elem_get(event_group); + + RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), + EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_APPLY, + "Invalid event group: %" PRI_EGRP "", event_group); + + RETURN_ERROR_IF(count <= 0, + EM_ERR_TOO_LARGE, EM_ESCOPE_EVENT_GROUP_APPLY, + "Invalid argument: count %i", count); + + ret = check_notif_tbl(num_notif, notif_tbl); + RETURN_ERROR_IF(ret != EM_OK, ret, EM_ESCOPE_EVENT_GROUP_APPLY, + "Invalid notif cfg given!"); + + if (EM_EVENT_GROUP_SAFE_MODE) + egrp_count = egrp_elem->post.count; + else + egrp_count = egrp_elem->post.all; + + RETURN_ERROR_IF(egrp_count != 0 || egrp_elem->ready == 0, + EM_ERR_NOT_FREE, EM_ESCOPE_EVENT_GROUP_APPLY, + "Event group %" PRI_EGRP " currently in use! count: %i", + event_group, egrp_count); + + if (EM_EVENT_GROUP_SAFE_MODE) { + egrp_elem->post.count = count; + /* Event group generation increments when _apply() is called */ + egrp_elem->post.gen++; + egrp_elem->pre.all = egrp_elem->post.all; + } else { + egrp_elem->post.all = count; + } + + egrp_elem->ready = 0; + egrp_elem->num_notif = num_notif; + + for (int i = 0; i < num_notif; i++) { + egrp_elem->notif_tbl[i].event = notif_tbl[i].event; + egrp_elem->notif_tbl[i].queue = notif_tbl[i].queue; + egrp_elem->notif_tbl[i].egroup = notif_tbl[i].egroup; + } + + /* Sync mem */ + env_sync_mem(); + + return EM_OK; +} + +em_status_t +em_event_group_increment(int count) +{ + const em_locm_t *const locm = &em_locm; + em_event_group_t const egrp = em_event_group_current(); + event_group_elem_t *egrp_elem = NULL; + + if (egrp != EM_EVENT_GROUP_UNDEF) + egrp_elem = locm->current.egrp_elem; + + RETURN_ERROR_IF(egrp_elem == NULL, + EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_INCREMENT, + "No current event group (%" PRI_EGRP ")", egrp); + + RETURN_ERROR_IF(!event_group_allocated(egrp_elem) || egrp_elem->ready, + EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_INCREMENT, + "Current event group in a bad state (%" PRI_EGRP ")", + egrp); + + if (!EM_EVENT_GROUP_SAFE_MODE) { + EM_ATOMIC_ADD(&egrp_elem->post.atomic, count); + return EM_OK; + } + + egrp_counter_t current_count; + egrp_counter_t new_count; + /* Add to post counter before count is zero or generation mismatch */ + do { + current_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); + + RETURN_ERROR_IF(current_count.count <= 0 || + current_count.gen != locm->current.egrp_gen, + EM_ERR_BAD_STATE, + EM_ESCOPE_EVENT_GROUP_INCREMENT, + "Expired event group (%" PRI_EGRP ")", + egrp); + + new_count = current_count; + new_count.count += count; + } while (!EM_ATOMIC_CMPSET(&egrp_elem->post.atomic, + current_count.all, new_count.all)); + + /* Add to pre counter if generation matches */ + do { + current_count.all = EM_ATOMIC_GET(&egrp_elem->pre.atomic); + + RETURN_ERROR_IF(current_count.gen != locm->current.egrp_gen, + EM_ERR_BAD_STATE, + EM_ESCOPE_EVENT_GROUP_INCREMENT, + "Expired event group (%" PRI_EGRP ")", + egrp); + + new_count = current_count; + new_count.count += count; + } while (!EM_ATOMIC_CMPSET(&egrp_elem->pre.atomic, + current_count.all, new_count.all)); + + return EM_OK; +} + +int +em_event_group_is_ready(em_event_group_t event_group) +{ + const event_group_elem_t *egrp_elem = + event_group_elem_get(event_group); + + if (unlikely(egrp_elem == NULL || !event_group_allocated(egrp_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_IS_READY, + "Invalid event group: %" PRI_EGRP "", + event_group); + return EM_FALSE; + } + + uint64_t count; + + if (EM_EVENT_GROUP_SAFE_MODE) + count = egrp_elem->post.count; + else + count = egrp_elem->post.all; + + if (count == 0 && egrp_elem->ready) + return EM_TRUE; + else + return EM_FALSE; +} + +em_event_group_t +em_event_group_current(void) +{ + em_locm_t *const locm = &em_locm; + + if (!EM_EVENT_GROUP_SAFE_MODE) + return locm->current.egrp; + + if (locm->current.egrp == EM_EVENT_GROUP_UNDEF) + return EM_EVENT_GROUP_UNDEF; + + const event_group_elem_t *egrp_elem = locm->current.egrp_elem; + egrp_counter_t current; + + if (egrp_elem == NULL) + return EM_EVENT_GROUP_UNDEF; + + current.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); + + if (locm->current.egrp_gen != current.gen || current.count <= 0) + locm->current.egrp = EM_EVENT_GROUP_UNDEF; + + return locm->current.egrp; +} + +em_status_t +em_send_group(em_event_t event, em_queue_t queue, + em_event_group_t event_group) +{ + const event_group_elem_t *egrp_elem = event_group_elem_get(event_group); + const bool is_external = queue_external(queue); + queue_elem_t *q_elem = NULL; + event_hdr_t *ev_hdr; + em_status_t stat; + + /* + * Check all args + */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && event == EM_EVENT_UNDEF, + EM_ERR_BAD_ID, EM_ESCOPE_SEND_GROUP, "Invalid event"); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && + event_group != EM_EVENT_GROUP_UNDEF && !egrp_elem, + EM_ERR_NOT_FOUND, EM_ESCOPE_SEND_GROUP, + "Invalid event group:%" PRI_EGRP "", event_group); + + ev_hdr = event_to_hdr(event); + + if (!is_external) { + /* queue belongs to this EM instance */ + q_elem = queue_elem_get(queue); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 0 && !q_elem, + EM_ERR_BAD_ID, EM_ESCOPE_SEND_GROUP, + "Invalid queue:%" PRI_QUEUE "", queue); + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && !queue_allocated(q_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_SEND_GROUP, + "Invalid queue:%" PRI_QUEUE "", queue); + } + + RETURN_ERROR_IF(EM_CHECK_LEVEL > 1 && event_group != EM_EVENT_GROUP_UNDEF && + !event_group_allocated(egrp_elem), + EM_ERR_BAD_STATE, EM_ESCOPE_SEND_GROUP, + "Invalid event group:%" PRI_EGRP "", event_group); + + /* + * Verify that event references are not used with event groups. + * Cannot save the event group into an event header shared between + * all the references. + */ + RETURN_ERROR_IF(EM_CHECK_LEVEL > 2 && event_group != EM_EVENT_GROUP_UNDEF && + event_has_ref(event), + EM_ERR_BAD_CONTEXT, EM_ESCOPE_SEND_GROUP, + "Event has references: can't use references with event groups"); + + /* Store the event group information in the event header */ + if (egrp_elem) { + ev_hdr->egrp = egrp_elem->event_group; + if (EM_EVENT_GROUP_SAFE_MODE) + ev_hdr->egrp_gen = event_group_gen_get(egrp_elem); + } else { + ev_hdr->egrp = EM_EVENT_GROUP_UNDEF; + } + + /* Buffer events sent from EO-start to scheduled queues */ + if (unlikely(!is_external && + q_elem->scheduled && em_locm.start_eo_elem)) { + /* + * em_send_group() called from within an EO-start function: + * all events sent to scheduled queues will be buffered + * and sent when the EO-start operation completes. + */ + if (esv_enabled()) + evstate_usr2em(event, ev_hdr, EVSTATE__SEND_EGRP); + + int num = eo_start_buffer_events(&event, 1, queue); + + stat = num == 1 ? EM_OK : EM_ERR_OPERATION_FAILED; + if (EM_CHECK_LEVEL == 0) + return stat; + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND_GROUP, + "send-group from EO-start failed"); + return EM_OK; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(&event, 1, queue, event_group); + + if (is_external) { + /* + * Send out of EM to another device via event-chaining and a + * user-provided function 'event_send_device()' + */ + stat = send_chaining_egrp(event, ev_hdr, queue, egrp_elem); + + if (EM_CHECK_LEVEL == 0) + return stat; + + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_SEND_GROUP, + "send_chaining_egrp: Q:%" PRI_QUEUE "", queue); + return EM_OK; + } + + /* + * Normal send to a queue on this device + */ + if (esv_enabled()) + evstate_usr2em(event, ev_hdr, EVSTATE__SEND_EGRP); + + switch (q_elem->type) { + case EM_QUEUE_TYPE_ATOMIC: + case EM_QUEUE_TYPE_PARALLEL: + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + stat = send_event(event, q_elem); + break; + case EM_QUEUE_TYPE_UNSCHEDULED: + stat = queue_unsched_enqueue(event, q_elem); + break; + case EM_QUEUE_TYPE_LOCAL: + stat = send_local(event, q_elem); + break; + default: + stat = EM_ERR_NOT_FOUND; + break; + } + + if (EM_CHECK_LEVEL == 0) + return stat; + + if (unlikely(stat != EM_OK)) { + stat = INTERNAL_ERROR(stat, EM_ESCOPE_SEND_GROUP, + "send egrp: Q:%" PRI_QUEUE " type:%" PRI_QTYPE "", + queue, q_elem->type); + if (esv_enabled()) + evstate_usr2em_revert(event, ev_hdr, + EVSTATE__SEND_EGRP__FAIL); + return stat; + } + + return EM_OK; +} + +/* + * em_send_group_multi() helper: check function arguments + */ +static inline em_status_t +send_grpmulti_check_args(const em_event_t events[], int num, em_queue_t queue, + em_event_group_t event_group, + const event_group_elem_t *egrp_elem, + bool *is_external__out /*out if EM_OK*/, + queue_elem_t **q_elem__out /*out if EM_OK*/) +{ + const bool is_external = queue_external(queue); + queue_elem_t *q_elem = NULL; + int i; + + if (EM_CHECK_LEVEL > 0 && + unlikely(!events || num <= 0 || + (event_group != EM_EVENT_GROUP_UNDEF && !egrp_elem))) + return EM_ERR_BAD_ARG; + + if (EM_CHECK_LEVEL > 1 && + unlikely(event_group != EM_EVENT_GROUP_UNDEF && + !event_group_allocated(egrp_elem))) + return EM_ERR_BAD_STATE; + + if (EM_CHECK_LEVEL > 2) { + for (i = 0; i < num && events[i] != EM_EVENT_UNDEF; i++) + ; + if (unlikely(i != num)) + return EM_ERR_BAD_POINTER; + } + + if (!is_external) { + /* queue belongs to this EM instance */ + q_elem = queue_elem_get(queue); + + if (EM_CHECK_LEVEL > 0 && unlikely(!q_elem)) + return EM_ERR_BAD_ARG; + if (EM_CHECK_LEVEL > 1 && unlikely(!queue_allocated(q_elem))) + return EM_ERR_BAD_STATE; + } + + *is_external__out = is_external; + *q_elem__out = q_elem; /* NULL if is_external */ + return EM_OK; +} + +int +em_send_group_multi(const em_event_t events[], int num, em_queue_t queue, + em_event_group_t event_group) +{ + const event_group_elem_t *egrp_elem = event_group_elem_get(event_group); + bool is_external = false; /* set by check_args */ + queue_elem_t *q_elem = NULL; /* set by check_args */ + int num_sent; + + /* + * Check all args. + */ + em_status_t err = + send_grpmulti_check_args(events, num, queue, event_group, egrp_elem, + /*out if EM_OK:*/ &is_external, &q_elem); + if (unlikely(err != EM_OK)) { + INTERNAL_ERROR(err, EM_ESCOPE_SEND_GROUP_MULTI, + "Invalid args: events:%p num:%d\n" + "Q:%" PRI_QUEUE " event_group:%" PRI_EGRP "", + events, num, queue, event_group); + return 0; + } + + /* + * Verify that event references are not used with event groups. + * Cannot save the event group into an event header shared between + * all the references + */ + if (unlikely(EM_CHECK_LEVEL > 2 && event_group != EM_EVENT_GROUP_UNDEF)) { + for (int i = 0; i < num; i++) { + if (likely(!event_has_ref(events[i]))) + continue; + + INTERNAL_ERROR(EM_ERR_BAD_CONTEXT, EM_ESCOPE_SEND_GROUP_MULTI, + "event[%d] has references: can't use with event groups", i); + return 0; + } + } + + event_hdr_t *ev_hdrs[num]; + + event_to_hdr_multi(events, ev_hdrs, num); + + /* Store the event group information in the event header */ + for (int i = 0; i < num; i++) + ev_hdrs[i]->egrp = event_group; /* can be EM_EVENT_GROUP_UNDEF*/ + + if (EM_EVENT_GROUP_SAFE_MODE && egrp_elem) { + uint64_t egrp_gen = event_group_gen_get(egrp_elem); + + for (int i = 0; i < num; i++) + ev_hdrs[i]->egrp_gen = egrp_gen; + } + + /* Buffer events sent from EO-start to scheduled queues */ + if (unlikely(!is_external && + q_elem->scheduled && em_locm.start_eo_elem)) { + /* + * em_send_group_multi() called from within an EO-start + * function: all events sent to scheduled queues will be + * buffered and sent when the EO-start operation completes. + */ + if (esv_enabled()) + evstate_usr2em_multi(events, ev_hdrs, num, + EVSTATE__SEND_EGRP_MULTI); + num_sent = eo_start_buffer_events(events, num, queue); + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_GROUP_MULTI, + "send-egrp-multi EO-start:req:%d sent:%d", + num, num_sent); + } + return num_sent; + } + + if (EM_API_HOOKS_ENABLE) + call_api_hooks_send(events, num, queue, event_group); + + if (is_external) { + /* + * Send out of EM to another device via event-chaining and a + * user-provided function 'event_send_device_multi()' + */ + num_sent = send_chaining_egrp_multi(events, ev_hdrs, num, + queue, egrp_elem); + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, + EM_ESCOPE_SEND_GROUP_MULTI, + "send_chaining_egrp_multi: req:%d, sent:%d", + num, num_sent); + } + + return num_sent; + } + + /* + * Normal send to a queue on this device + */ + if (esv_enabled()) + evstate_usr2em_multi(events, ev_hdrs, num, + EVSTATE__SEND_EGRP_MULTI); + + switch (q_elem->type) { + case EM_QUEUE_TYPE_ATOMIC: + case EM_QUEUE_TYPE_PARALLEL: + case EM_QUEUE_TYPE_PARALLEL_ORDERED: + num_sent = send_event_multi(events, num, q_elem); + break; + case EM_QUEUE_TYPE_LOCAL: + num_sent = send_local_multi(events, num, q_elem); + break; + default: + num_sent = 0; + break; + } + + if (EM_CHECK_LEVEL > 0 && unlikely(num_sent != num)) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_SEND_GROUP_MULTI, + "send-egrp-multi failed: req:%d, sent:%d", + num, num_sent); + if (esv_enabled()) + evstate_usr2em_revert_multi(&events[num_sent], + &ev_hdrs[num_sent], + num - num_sent, + EVSTATE__SEND_EGRP_MULTI__FAIL); + } + + return num_sent; +} + +void +em_event_group_processing_end(void) +{ + em_locm_t *const locm = &em_locm; + const em_event_group_t event_group = em_event_group_current(); + + if (unlikely(invalid_egrp(event_group))) + return; + + /* + * Atomically decrement the event group count. + * If new count is zero, send notification events. + */ + event_group_count_decrement(locm->current.rcv_multi_cnt); + + locm->current.egrp = EM_EVENT_GROUP_UNDEF; + locm->current.egrp_elem = NULL; +} + +em_status_t +em_event_group_assign(em_event_group_t event_group) +{ + em_locm_t *const locm = &em_locm; + event_group_elem_t *const egrp_elem = + event_group_elem_get(event_group); + + RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), + EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_ASSIGN, + "Invalid event group: %" PRI_EGRP "", event_group); + + RETURN_ERROR_IF(locm->current.egrp != EM_EVENT_GROUP_UNDEF, + EM_ERR_BAD_CONTEXT, EM_ESCOPE_EVENT_GROUP_ASSIGN, + "Cannot assign event group %" PRI_EGRP ",\n" + "event already belongs to event group %" PRI_EGRP "", + event_group, locm->current.egrp); + + RETURN_ERROR_IF(egrp_elem->ready, + EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_ASSIGN, + "Cannot assign event group %" PRI_EGRP ",\n" + "Event group has not been applied", event_group); + + locm->current.egrp = event_group; + locm->current.egrp_elem = egrp_elem; + + if (EM_EVENT_GROUP_SAFE_MODE) + locm->current.egrp_gen = egrp_elem->post.gen; + + return EM_OK; +} + +/* + * Abort is successful if generation can be incremented before post_count + * reaches zero. + */ +em_status_t +em_event_group_abort(em_event_group_t event_group) +{ + event_group_elem_t *const egrp_elem = + event_group_elem_get(event_group); + + RETURN_ERROR_IF(egrp_elem == NULL || !event_group_allocated(egrp_elem), + EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_ABORT, + "Invalid event group: %" PRI_EGRP "", event_group); + + if (!EM_EVENT_GROUP_SAFE_MODE) { + RETURN_ERROR_IF(egrp_elem->post.all <= 0, + EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_ABORT, + "Event group abort too late, notifs already sent"); + egrp_elem->post.all = 0; + /* mark group ready for new apply and stop notifs */ + egrp_elem->ready = 1; + return EM_OK; + } + + egrp_counter_t current_count; + egrp_counter_t new_count; + + /* Attemp to set count to zero before count reaches zero */ + do { + current_count.all = EM_ATOMIC_GET(&egrp_elem->post.atomic); + + RETURN_ERROR_IF(current_count.count <= 0, + EM_ERR_BAD_STATE, EM_ESCOPE_EVENT_GROUP_ABORT, + "Event group abort late, notifs already sent"); + new_count = current_count; + new_count.count = 0; + } while (!EM_ATOMIC_CMPSET(&egrp_elem->post.atomic, + current_count.all, new_count.all)); + /* + * Change pre_count also to prevent expired event group events + * from reaching receive function. + */ + EM_ATOMIC_SET(&egrp_elem->pre.atomic, new_count.all); + /* Ready for new apply */ + egrp_elem->ready = 1; + + return EM_OK; +} + +int +em_event_group_get_notif(em_event_group_t event_group, + int max_notif, em_notif_t notif_tbl[]) +{ + const event_group_elem_t *egrp_elem = + event_group_elem_get(event_group); + int num_notif = 0; /* return value */ + + if (unlikely(egrp_elem == NULL || !event_group_allocated(egrp_elem) || + max_notif < 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_EVENT_GROUP_GET_NOTIF, + "Invalid args: evgrp:%" PRI_EGRP ", notifs:%d", + event_group, max_notif); + return 0; + } + + if (unlikely(max_notif == 0)) + return 0; + + if (unlikely(notif_tbl == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, + EM_ESCOPE_EVENT_GROUP_GET_NOTIF, + "Invalid notif_tbl[] given"); + return 0; + } + + if (!egrp_elem->ready) { + num_notif = max_notif < egrp_elem->num_notif ? + max_notif : egrp_elem->num_notif; + + for (int i = 0; i < num_notif; i++) { + notif_tbl[i].event = egrp_elem->notif_tbl[i].event; + notif_tbl[i].queue = egrp_elem->notif_tbl[i].queue; + notif_tbl[i].egroup = egrp_elem->notif_tbl[i].egroup; + } + } + + return num_notif; +} + +em_event_group_t +em_event_group_get_first(unsigned int *num) +{ + const event_group_elem_t *const egrp_elem_tbl = + em_shm->event_group_tbl.egrp_elem; + const event_group_elem_t *egrp_elem = &egrp_elem_tbl[0]; + const unsigned int egrp_count = event_group_count(); + + _egrp_tbl_iter_idx = 0; /* reset iteration */ + + if (num) + *num = egrp_count; + + if (egrp_count == 0) { + _egrp_tbl_iter_idx = EM_MAX_EVENT_GROUPS; /* UNDEF=_get_next()*/ + return EM_EVENT_GROUP_UNDEF; + } + + /* find first */ + while (!event_group_allocated(egrp_elem)) { + _egrp_tbl_iter_idx++; + if (_egrp_tbl_iter_idx >= EM_MAX_EVENT_GROUPS) + return EM_EVENT_GROUP_UNDEF; + egrp_elem = &egrp_elem_tbl[_egrp_tbl_iter_idx]; + } + + return egrp_idx2hdl(_egrp_tbl_iter_idx); +} + +em_event_group_t +em_event_group_get_next(void) +{ + if (_egrp_tbl_iter_idx >= EM_MAX_EVENT_GROUPS - 1) + return EM_EVENT_GROUP_UNDEF; + + _egrp_tbl_iter_idx++; + + const event_group_elem_t *const egrp_elem_tbl = + em_shm->event_group_tbl.egrp_elem; + const event_group_elem_t *egrp_elem = + &egrp_elem_tbl[_egrp_tbl_iter_idx]; + + /* find next */ + while (!event_group_allocated(egrp_elem)) { + _egrp_tbl_iter_idx++; + if (_egrp_tbl_iter_idx >= EM_MAX_EVENT_GROUPS) + return EM_EVENT_GROUP_UNDEF; + egrp_elem = &egrp_elem_tbl[_egrp_tbl_iter_idx]; + } + + return egrp_idx2hdl(_egrp_tbl_iter_idx); +} diff --git a/src/event_machine_helper.c b/src/event_machine_helper.c index f4103d00..8abda905 100644 --- a/src/event_machine_helper.c +++ b/src/event_machine_helper.c @@ -1,117 +1,128 @@ -/* - * Copyright (c) 2012, Nokia Siemens Networks - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include /* basename() */ -#include /* ssize_t */ -#include "em_include.h" - -int -em_error_format_string(char *str, size_t size, em_eo_t eo, em_status_t error, - em_escope_t escope, va_list args) -{ - int ret = -1; - - if (!EM_ESCOPE(escope) || (ssize_t)size <= 0) - return 0; - - /* - * va_list contains: __FILE__, __func__, __LINE__, (format), - * ## __VA_ARGS__ as reported by the INTERNAL_ERROR macro. - */ - char *file = va_arg(args, char*); - const char *func = va_arg(args, const char*); - const int line = va_arg(args, const int); - const char *format = va_arg(args, const char*); - const char *base = basename(file); - char eo_str[sizeof("EO:xxxxxx-abdc ") + EM_EO_NAME_LEN]; - const uint64_t loc_err_cnt = em_locm.error_count; - const uint64_t glob_err_cnt = load_global_err_cnt(); - - if (eo == EM_EO_UNDEF) { - eo_str[0] = '\0'; - } else { - char eo_name[EM_EO_NAME_LEN]; - size_t nlen; - - nlen = em_eo_get_name(eo, eo_name, sizeof(eo_name)); - eo_name[nlen] = '\0'; - - snprintf(eo_str, sizeof(eo_str), - "EO:%" PRI_EO "-\"%s\" ", eo, eo_name); - eo_str[sizeof(eo_str) - 1] = '\0'; - } - - ret = - snprintf(str, size, "\n" - "EM ERROR:0x%08X ESCOPE:0x%08X %s\n" - "core:%02i ecount:%" PRIu64 "(%" PRIu64 ") %s:%i %s()\n", - error, escope, eo_str, em_core_id(), - glob_err_cnt, loc_err_cnt, base, line, func); - - if (ret > 0 && ret < (int64_t)size) { -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wformat-nonliteral" - ret += vsnprintf(str + ret, size - ret, format, args); -#pragma GCC diagnostic pop - if (ret > 0 && ret < (int64_t)size) - ret += snprintf(str + ret, size - ret, "\n"); - } - - str[size - 1] = '\0'; - - return MIN((int64_t)size, ret + 1); -} - -int -em_core_id_get_physical(int em_core_id) -{ - return logic_to_phys_core_id(em_core_id); -} - -void -em_core_mask_get_physical(em_core_mask_t *phys, const em_core_mask_t *logic) -{ - if (em_core_mask_equal(logic, &em_shm->core_map.logic_mask)) { - em_core_mask_copy(phys, &em_shm->core_map.phys_mask); - } else { - int i; - - em_core_mask_zero(phys); - for (i = 0; i < EM_MAX_CORES; i++) { - int phys_core; - - if (em_core_mask_isset(i, logic)) { - phys_core = logic_to_phys_core_id(i); - em_core_mask_set(phys_core, phys); - } - } - } -} +/* + * Copyright (c) 2012, Nokia Siemens Networks + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include /* basename() */ +#include /* ssize_t */ +#include "em_include.h" +#include +#include + +int +em_error_format_string(char *str, size_t size, em_eo_t eo, em_status_t error, + em_escope_t escope, va_list args) +{ + int ret = -1; + + if (!EM_ESCOPE(escope) || (ssize_t)size <= 0) + return 0; + + /* + * va_list contains: __FILE__, __func__, __LINE__, (format), + * ## __VA_ARGS__ as reported by the INTERNAL_ERROR macro. + */ + char *file = va_arg(args, char*); + const char *func = va_arg(args, const char*); + const int line = va_arg(args, const int); + const char *format = va_arg(args, const char*); + const char *base = basename(file); + char eo_str[sizeof("EO:xxxxxx-abdc ") + EM_EO_NAME_LEN]; + const uint64_t loc_err_cnt = em_locm.error_count; + const uint64_t glob_err_cnt = load_global_err_cnt(); + + if (eo == EM_EO_UNDEF) { + eo_str[0] = '\0'; + } else { + char eo_name[EM_EO_NAME_LEN]; + size_t nlen; + + nlen = em_eo_get_name(eo, eo_name, sizeof(eo_name)); + eo_name[nlen] = '\0'; + + snprintf(eo_str, sizeof(eo_str), + "EO:%" PRI_EO "-\"%s\" ", eo, eo_name); + eo_str[sizeof(eo_str) - 1] = '\0'; + } + + ret = + snprintf(str, size, "\n" + "EM ERROR:0x%08X ESCOPE:0x%08X %s\n" + "core:%02i ecount:%" PRIu64 "(%" PRIu64 ") %s:%i %s()\n", + error, escope, eo_str, em_core_id(), + glob_err_cnt, loc_err_cnt, base, line, func); + + if (ret > 0 && ret < (int64_t)size) { +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wformat-nonliteral" + ret += vsnprintf(str + ret, size - ret, format, args); +#pragma GCC diagnostic pop + if (ret > 0 && ret < (int64_t)size) + ret += snprintf(str + ret, size - ret, "\n"); + } + + str[size - 1] = '\0'; + + return MIN((int64_t)size, ret + 1); +} + +int +em_core_id_get_physical(int em_core_id) +{ + return logic_to_phys_core_id(em_core_id); +} + +void +em_core_mask_get_physical(em_core_mask_t *phys, const em_core_mask_t *logic) +{ + if (em_core_mask_equal(logic, &em_shm->core_map.logic_mask)) { + em_core_mask_copy(phys, &em_shm->core_map.phys_mask); + } else { + em_core_mask_zero(phys); + for (int i = 0; i < EM_MAX_CORES; i++) { + int phys_core; + + if (em_core_mask_isset(i, logic)) { + phys_core = logic_to_phys_core_id(i); + em_core_mask_set(phys_core, phys); + } + } + } +} + +uint64_t em_debug_timestamp(em_debug_tsp_t tsp) +{ + if (EM_DEBUG_TIMESTAMP_ENABLE == 0) + return 0; + + if (unlikely(tsp >= EM_DEBUG_TSP_LAST || tsp < 0)) + return 0; + else + return em_locm.debug_ts[tsp]; +} diff --git a/src/event_machine_hooks.c b/src/event_machine_hooks.c index eee527ce..faf578b7 100644 --- a/src/event_machine_hooks.c +++ b/src/event_machine_hooks.c @@ -1,146 +1,254 @@ -/* - * Copyright (c) 2019, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine API callback hooks. - * - */ - -#include "em_include.h" - -em_status_t -em_hooks_register_alloc(em_api_hook_alloc_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_HOOKS_REGISTER_ALLOC, - "EM API callback hooks disabled"); - - hook_fn.alloc = func; - stat = hook_register(ALLOC_HOOK, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_ALLOC, - "Alloc hook register failed"); - - return EM_OK; -} - -em_status_t -em_hooks_unregister_alloc(em_api_hook_alloc_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_HOOKS_UNREGISTER_ALLOC, - "EM API callback hooks disabled"); - - hook_fn.alloc = func; - stat = hook_unregister(ALLOC_HOOK, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_ALLOC, - "Alloc hook unregister failed"); - - return EM_OK; -} - -em_status_t -em_hooks_register_free(em_api_hook_free_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_HOOKS_REGISTER_FREE, - "EM API callback hooks disabled"); - - hook_fn.free = func; - stat = hook_register(FREE_HOOK, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_FREE, - "Free hook register failed"); - - return EM_OK; -} - -em_status_t -em_hooks_unregister_free(em_api_hook_free_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_HOOKS_UNREGISTER_FREE, - "EM API callback hooks disabled"); - - hook_fn.free = func; - stat = hook_unregister(FREE_HOOK, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_FREE, - "Free hook unregister failed"); - - return EM_OK; -} - -em_status_t -em_hooks_register_send(em_api_hook_send_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_HOOKS_REGISTER_SEND, - "EM API callback hooks disabled"); - - hook_fn.send = func; - stat = hook_register(SEND_HOOK, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_SEND, - "Send hook register failed"); - - return EM_OK; -} - -em_status_t -em_hooks_unregister_send(em_api_hook_send_t func) -{ - hook_fn_t hook_fn; - em_status_t stat; - - RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, - EM_ESCOPE_HOOKS_UNREGISTER_SEND, - "EM API callback hooks disabled"); - - hook_fn.send = func; - stat = hook_unregister(SEND_HOOK, hook_fn); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_SEND, - "Send hook unregister failed"); - - return EM_OK; -} +/* + * Copyright (c) 2019, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine API callback hooks. + * + */ + +#include "em_include.h" + +em_status_t +em_hooks_register_alloc(em_api_hook_alloc_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_REGISTER_ALLOC, + "EM API callback hooks disabled"); + + hook_fn.alloc = func; + stat = hook_register(ALLOC_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_ALLOC, + "Alloc hook register failed"); + + return EM_OK; +} + +em_status_t +em_hooks_unregister_alloc(em_api_hook_alloc_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_UNREGISTER_ALLOC, + "EM API callback hooks disabled"); + + hook_fn.alloc = func; + stat = hook_unregister(ALLOC_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_ALLOC, + "Alloc hook unregister failed"); + + return EM_OK; +} + +em_status_t +em_hooks_register_free(em_api_hook_free_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_REGISTER_FREE, + "EM API callback hooks disabled"); + + hook_fn.free = func; + stat = hook_register(FREE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_FREE, + "Free hook register failed"); + + return EM_OK; +} + +em_status_t +em_hooks_unregister_free(em_api_hook_free_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_UNREGISTER_FREE, + "EM API callback hooks disabled"); + + hook_fn.free = func; + stat = hook_unregister(FREE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_FREE, + "Free hook unregister failed"); + + return EM_OK; +} + +em_status_t +em_hooks_register_send(em_api_hook_send_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_REGISTER_SEND, + "EM API callback hooks disabled"); + + hook_fn.send = func; + stat = hook_register(SEND_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_SEND, + "Send hook register failed"); + + return EM_OK; +} + +em_status_t +em_hooks_unregister_send(em_api_hook_send_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_API_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_UNREGISTER_SEND, + "EM API callback hooks disabled"); + + hook_fn.send = func; + stat = hook_unregister(SEND_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_SEND, + "Send hook unregister failed"); + + return EM_OK; +} + +em_status_t +em_hooks_register_to_idle(em_idle_hook_to_idle_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_IDLE_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_REGISTER_TO_IDLE, + "EM IDLE callback hooks disabled"); + + hook_fn.to_idle = func; + stat = hook_register(TO_IDLE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_TO_IDLE, + "To_idle hook register failed"); + + return EM_OK; +} + +em_status_t +em_hooks_unregister_to_idle(em_idle_hook_to_idle_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_IDLE_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_UNREGISTER_TO_IDLE, + "EM IDLE callback hooks disabled"); + + hook_fn.to_idle = func; + stat = hook_unregister(TO_IDLE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_TO_IDLE, + "To_idle hook unregister failed"); + + return EM_OK; +} + +em_status_t +em_hooks_register_to_active(em_idle_hook_to_active_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_IDLE_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_REGISTER_TO_ACTIVE, + "EM IDLE callback hooks disabled"); + + hook_fn.to_active = func; + stat = hook_register(TO_ACTIVE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_TO_ACTIVE, + "To_active hook register failed"); + + return EM_OK; +} + +em_status_t +em_hooks_unregister_to_active(em_idle_hook_to_active_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_IDLE_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_UNREGISTER_TO_ACTIVE, + "EM IDLE callback hooks disabled"); + + hook_fn.to_active = func; + stat = hook_unregister(TO_ACTIVE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_TO_ACTIVE, + "To_active hook unregister failed"); + + return EM_OK; +} + +em_status_t +em_hooks_register_while_idle(em_idle_hook_while_idle_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_IDLE_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_REGISTER_WHILE_IDLE, + "EM IDLE callback hooks disabled"); + + hook_fn.while_idle = func; + stat = hook_register(WHILE_IDLE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_REGISTER_WHILE_IDLE, + "While_idle hook register failed"); + + return EM_OK; +} + +em_status_t +em_hooks_unregister_while_idle(em_idle_hook_while_idle_t func) +{ + hook_fn_t hook_fn; + em_status_t stat; + + RETURN_ERROR_IF(!EM_IDLE_HOOKS_ENABLE, EM_ERR_NOT_IMPLEMENTED, + EM_ESCOPE_HOOKS_UNREGISTER_WHILE_IDLE, + "EM IDLE callback hooks disabled"); + + hook_fn.while_idle = func; + stat = hook_unregister(WHILE_IDLE_HOOK, hook_fn); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_HOOKS_UNREGISTER_WHILE_IDLE, + "While_idle hook unregister failed"); + + return EM_OK; +} diff --git a/src/event_machine_hw_specific.c b/src/event_machine_hw_specific.c index 603dc284..4dee27d1 100644 --- a/src/event_machine_hw_specific.c +++ b/src/event_machine_hw_specific.c @@ -1,202 +1,200 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine HW specific functions and other additions. - * - */ -#include "em_include.h" - -/* - * core mask manipulation prototypes or inlined functions - */ - -void em_core_mask_zero(em_core_mask_t *mask) -{ - odp_cpumask_zero(&mask->odp_cpumask); -} - -void em_core_mask_set(int core, em_core_mask_t *mask) -{ - odp_cpumask_set(&mask->odp_cpumask, core); -} - -void em_core_mask_clr(int core, em_core_mask_t *mask) -{ - odp_cpumask_clr(&mask->odp_cpumask, core); -} - -int em_core_mask_isset(int core, const em_core_mask_t *mask) -{ - return odp_cpumask_isset(&mask->odp_cpumask, core); -} - -int em_core_mask_iszero(const em_core_mask_t *mask) -{ - odp_cpumask_t zero_mask; - - odp_cpumask_zero(&zero_mask); - return odp_cpumask_equal(&zero_mask, &mask->odp_cpumask); -} - -int em_core_mask_equal(const em_core_mask_t *mask1, const em_core_mask_t *mask2) -{ - return odp_cpumask_equal(&mask1->odp_cpumask, &mask2->odp_cpumask); -} - -void em_core_mask_set_count(int count, em_core_mask_t *mask) -{ - int i; - - for (i = 0; i < count; i++) - odp_cpumask_set(&mask->odp_cpumask, i); -} - -void em_core_mask_copy(em_core_mask_t *dst, const em_core_mask_t *src) -{ - odp_cpumask_copy(&dst->odp_cpumask, &src->odp_cpumask); -} - -int em_core_mask_count(const em_core_mask_t *mask) -{ - return odp_cpumask_count(&mask->odp_cpumask); -} - -void em_core_mask_set_bits(const uint64_t bits[], int len, em_core_mask_t *mask) -{ - const int maxlen = (ODP_CPUMASK_SIZE + 63) / 64; - const int maxcpu = ODP_CPUMASK_SIZE - 1; - int cpu; - - len = len > maxlen ? maxlen : len; - - for (int i = 0; i < len; i++) { - uint64_t mask64 = bits[i]; - - for (int j = 0; mask64 && j < 64; j++) { - cpu = i * 64 + j; - if (unlikely(cpu > maxcpu)) - return; - if (mask64 & ((uint64_t)1 << j)) { - odp_cpumask_set(&mask->odp_cpumask, cpu); - mask64 &= mask64 - 1; /*clear lowest set bit*/ - } - } - } -} - -int em_core_mask_get_bits(uint64_t bits[/*out*/], int len, - const em_core_mask_t *mask) -{ - int u64s_set; /* return value */ - int maxcpu = ODP_CPUMASK_SIZE - 1; - int i; - int j; - int cpu; - - if (unlikely(len < 1)) - return 0; - - if (maxcpu >= len * 64) - maxcpu = len * 64 - 1; - - /* zero out the bits[] array*/ - for (i = 0; i < len; i++) - bits[i] = 0; - - i = -1; - cpu = odp_cpumask_first(&mask->odp_cpumask); - while (cpu >= 0 && cpu <= maxcpu) { - i = cpu / 64; - j = cpu % 64; - bits[i] |= (uint64_t)1 << j; - cpu = odp_cpumask_next(&mask->odp_cpumask, cpu); - } - u64s_set = i + 1; /* >= 0 */ - return u64s_set; -} - -int em_core_mask_set_str(const char *mask_str, em_core_mask_t *mask) -{ - odp_cpumask_t str_mask; - - odp_cpumask_from_str(&str_mask, mask_str); - odp_cpumask_or(&mask->odp_cpumask, &mask->odp_cpumask, &str_mask); - - return 0; -} - -void em_core_mask_tostr(char *mask_str, int len, const em_core_mask_t *mask) -{ - int32_t ret = odp_cpumask_to_str(&mask->odp_cpumask, mask_str, len); - - if (unlikely(ret <= 0 && len > 0)) - mask_str[0] = '\0'; -} - -int em_core_mask_idx(int n, const em_core_mask_t *mask) -{ - if (unlikely((unsigned int)(n - 1) >= EM_MAX_CORES)) - return -1; - - int i = 1; - int cpu = odp_cpumask_first(&mask->odp_cpumask); - - while (cpu >= 0 && i < n) { - cpu = odp_cpumask_next(&mask->odp_cpumask, cpu); - i++; - } - - /* cpu >=0 only if odp_cpumask_first/next successful */ - return cpu; -} - -void em_core_mask_and(em_core_mask_t *dst, const em_core_mask_t *src1, - const em_core_mask_t *src2) -{ - odp_cpumask_and(&dst->odp_cpumask, - &src1->odp_cpumask, &src2->odp_cpumask); -} - -void em_core_mask_or(em_core_mask_t *dst, const em_core_mask_t *src1, - const em_core_mask_t *src2) -{ - odp_cpumask_or(&dst->odp_cpumask, - &src1->odp_cpumask, &src2->odp_cpumask); -} - -void em_core_mask_xor(em_core_mask_t *dst, const em_core_mask_t *src1, - const em_core_mask_t *src2) -{ - odp_cpumask_xor(&dst->odp_cpumask, - &src1->odp_cpumask, &src2->odp_cpumask); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine HW specific functions and other additions. + * + */ +#include "em_include.h" + +/* + * core mask manipulation prototypes or inlined functions + */ + +void em_core_mask_zero(em_core_mask_t *mask) +{ + odp_cpumask_zero(&mask->odp_cpumask); +} + +void em_core_mask_set(int core, em_core_mask_t *mask) +{ + odp_cpumask_set(&mask->odp_cpumask, core); +} + +void em_core_mask_clr(int core, em_core_mask_t *mask) +{ + odp_cpumask_clr(&mask->odp_cpumask, core); +} + +int em_core_mask_isset(int core, const em_core_mask_t *mask) +{ + return odp_cpumask_isset(&mask->odp_cpumask, core); +} + +int em_core_mask_iszero(const em_core_mask_t *mask) +{ + odp_cpumask_t zero_mask; + + odp_cpumask_zero(&zero_mask); + return odp_cpumask_equal(&zero_mask, &mask->odp_cpumask); +} + +int em_core_mask_equal(const em_core_mask_t *mask1, const em_core_mask_t *mask2) +{ + return odp_cpumask_equal(&mask1->odp_cpumask, &mask2->odp_cpumask); +} + +void em_core_mask_set_count(int count, em_core_mask_t *mask) +{ + for (int i = 0; i < count; i++) + odp_cpumask_set(&mask->odp_cpumask, i); +} + +void em_core_mask_copy(em_core_mask_t *dst, const em_core_mask_t *src) +{ + odp_cpumask_copy(&dst->odp_cpumask, &src->odp_cpumask); +} + +int em_core_mask_count(const em_core_mask_t *mask) +{ + return odp_cpumask_count(&mask->odp_cpumask); +} + +void em_core_mask_set_bits(const uint64_t bits[], int len, em_core_mask_t *mask) +{ + const int maxlen = (ODP_CPUMASK_SIZE + 63) / 64; + const int maxcpu = ODP_CPUMASK_SIZE - 1; + int cpu; + + len = len > maxlen ? maxlen : len; + + for (int i = 0; i < len; i++) { + uint64_t mask64 = bits[i]; + + for (int j = 0; mask64 && j < 64; j++) { + cpu = i * 64 + j; + if (unlikely(cpu > maxcpu)) + return; + if (mask64 & ((uint64_t)1 << j)) { + odp_cpumask_set(&mask->odp_cpumask, cpu); + mask64 &= mask64 - 1; /*clear lowest set bit*/ + } + } + } +} + +int em_core_mask_get_bits(uint64_t bits[/*out*/], int len, + const em_core_mask_t *mask) +{ + int u64s_set; /* return value */ + int maxcpu = ODP_CPUMASK_SIZE - 1; + int i; + int j; + int cpu; + + if (unlikely(len < 1)) + return 0; + + if (maxcpu >= len * 64) + maxcpu = len * 64 - 1; + + /* zero out the bits[] array*/ + for (i = 0; i < len; i++) + bits[i] = 0; + + i = -1; + cpu = odp_cpumask_first(&mask->odp_cpumask); + while (cpu >= 0 && cpu <= maxcpu) { + i = cpu / 64; + j = cpu % 64; + bits[i] |= (uint64_t)1 << j; + cpu = odp_cpumask_next(&mask->odp_cpumask, cpu); + } + u64s_set = i + 1; /* >= 0 */ + return u64s_set; +} + +int em_core_mask_set_str(const char *mask_str, em_core_mask_t *mask) +{ + odp_cpumask_t str_mask; + + odp_cpumask_from_str(&str_mask, mask_str); + odp_cpumask_or(&mask->odp_cpumask, &mask->odp_cpumask, &str_mask); + + return 0; +} + +void em_core_mask_tostr(char *mask_str, int len, const em_core_mask_t *mask) +{ + int32_t ret = odp_cpumask_to_str(&mask->odp_cpumask, mask_str, len); + + if (unlikely(ret <= 0 && len > 0)) + mask_str[0] = '\0'; +} + +int em_core_mask_idx(int n, const em_core_mask_t *mask) +{ + if (unlikely((unsigned int)(n - 1) >= EM_MAX_CORES)) + return -1; + + int i = 1; + int cpu = odp_cpumask_first(&mask->odp_cpumask); + + while (cpu >= 0 && i < n) { + cpu = odp_cpumask_next(&mask->odp_cpumask, cpu); + i++; + } + + /* cpu >=0 only if odp_cpumask_first/next successful */ + return cpu; +} + +void em_core_mask_and(em_core_mask_t *dst, const em_core_mask_t *src1, + const em_core_mask_t *src2) +{ + odp_cpumask_and(&dst->odp_cpumask, + &src1->odp_cpumask, &src2->odp_cpumask); +} + +void em_core_mask_or(em_core_mask_t *dst, const em_core_mask_t *src1, + const em_core_mask_t *src2) +{ + odp_cpumask_or(&dst->odp_cpumask, + &src1->odp_cpumask, &src2->odp_cpumask); +} + +void em_core_mask_xor(em_core_mask_t *dst, const em_core_mask_t *src1, + const em_core_mask_t *src2) +{ + odp_cpumask_xor(&dst->odp_cpumask, + &src1->odp_cpumask, &src2->odp_cpumask); +} diff --git a/src/event_machine_init.c b/src/event_machine_init.c index 30bf50e9..f947fc2a 100644 --- a/src/event_machine_init.c +++ b/src/event_machine_init.c @@ -1,450 +1,456 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine initialization and termination. - * - */ - -#include "em_include.h" -#include "add-ons/event_timer/em_timer.h" - -/** EM shared memory */ -em_shm_t *em_shm; - -/** Core local variables */ -ENV_LOCAL em_locm_t em_locm ENV_CACHE_LINE_ALIGNED = { - .current.egrp = EM_EVENT_GROUP_UNDEF, - .current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE, - .local_queues.empty = 1, - .do_input_poll = false, - .do_output_drain = false, - .sync_api.in_progress = false - /* other members initialized to 0 or NULL as per C standard */ -}; - -void em_conf_init(em_conf_t *conf) -{ - if (unlikely(!conf)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_CONF_INIT, "Conf pointer NULL!"); - return; - } - memset(conf, 0, sizeof(em_conf_t)); - em_pool_cfg_init(&conf->default_pool_cfg); -} - -em_status_t -em_init(const em_conf_t *conf) -{ - em_status_t stat; - int ret; - - RETURN_ERROR_IF(!conf, EM_FATAL(EM_ERR_BAD_POINTER), EM_ESCOPE_INIT, - "Conf pointer NULL!"); - - stat = early_log_init(conf->log.log_fn, conf->log.vlog_fn); - RETURN_ERROR_IF(stat != EM_OK, EM_FATAL(stat), - EM_ESCOPE_INIT, "User provided log funcs invalid!"); - - /* Sanity check: em_shm should not be set yet */ - RETURN_ERROR_IF(em_shm != NULL, - EM_FATAL(EM_ERR_BAD_STATE), EM_ESCOPE_INIT, - "EM shared memory ptr set - already initialized?"); - /* Sanity check: either process- or thread-per-core, but not both */ - RETURN_ERROR_IF(!(conf->process_per_core ^ conf->thread_per_core), - EM_FATAL(EM_ERR_BAD_ARG), EM_ESCOPE_INIT, - "Select EITHER process-per-core OR thread-per-core!"); - - /* - * Reserve the EM shared memory once at start-up. - */ - uint32_t flags = 0; - -#if ODP_VERSION_API_NUM(1, 33, 0) > ODP_VERSION_API - flags |= ODP_SHM_SINGLE_VA; -#else - odp_shm_capability_t shm_capa; - - ret = odp_shm_capability(&shm_capa); - RETURN_ERROR_IF(ret, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, - "shm capability error:%d", ret); - - if (shm_capa.flags & ODP_SHM_SINGLE_VA) - flags |= ODP_SHM_SINGLE_VA; -#endif - odp_shm_t shm = odp_shm_reserve("em_shm", sizeof(em_shm_t), - ODP_CACHE_LINE_SIZE, flags); - - RETURN_ERROR_IF(shm == ODP_SHM_INVALID, EM_ERR_ALLOC_FAILED, - EM_ESCOPE_INIT, "Shared memory reservation failed!"); - - em_shm = odp_shm_addr(shm); - - RETURN_ERROR_IF(em_shm == NULL, EM_ERR_NOT_FOUND, EM_ESCOPE_INIT, - "Shared memory ptr NULL!"); - - memset(em_shm, 0, sizeof(em_shm_t)); - - /* Store shm handle, can be used in em_term() to free the memory */ - em_shm->this_shm = shm; - - /* Store the given EM configuration */ - em_shm->conf = *conf; - - if (!EM_API_HOOKS_ENABLE) { - memset(&em_shm->conf.api_hooks, 0, - sizeof(em_shm->conf.api_hooks)); - } - - env_spinlock_init(&em_shm->init.lock); - - /* Initialize the log & error handling */ - log_init(); - error_init(); - - /* Initialize libconfig */ - ret = em_libconfig_init_global(&em_shm->libconfig); - RETURN_ERROR_IF(ret != 0, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, - "libconfig initialization failed:%d", ret); - - /* - * Initialize the physical-core <-> EM-core mapping - * - * EM-core <-> ODP-thread id mappings cannot be set up yet, - * the ODP thread id is assigned only when that thread is initialized. - * Set this mapping in core_map_init_local() - */ - stat = core_map_init(&em_shm->core_map, conf->core_count, - &conf->phys_mask); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "core_map_init() failed:%" PRI_STAT "", stat); - - /* Initialize the EM event dispatcher */ - stat = dispatch_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "dispatch_init() failed:%" PRI_STAT "", stat); - - /* - * Check validity of core masks for input_poll_fn and output_drain_fn. - * - * Masks must be a subset of logical EM core mask. Zero mask means - * that input_poll_fn and output_drain_fn are run on all EM cores. - */ - stat = input_poll_init(&em_shm->core_map.logic_mask, conf); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "input_poll_init() failed:%" PRI_STAT "", stat); - stat = output_drain_init(&em_shm->core_map.logic_mask, conf); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "output_drain_init() failed:%" PRI_STAT "", stat); - - /* - * Initialize Event State Verification (ESV), if enabled at compile time - */ - if (EM_ESV_ENABLE) { - stat = esv_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "esv_init() failed:%" PRI_STAT "", stat); - } - - /* Initialize EM callbacks/hooks */ - stat = hooks_init(&conf->api_hooks); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "hooks_init() failed:%" PRI_STAT "", stat); - - /* - * Initialize the EM buffer pools and create the EM_DEFAULT_POOL based - * on config. - */ - stat = pool_init(&em_shm->mpool_tbl, &em_shm->mpool_pool, - &conf->default_pool_cfg); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "pool_init() failed:%" PRI_STAT "", stat); - - stat = event_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "event_init() failed:%" PRI_STAT "", stat); - - stat = event_group_init(&em_shm->event_group_tbl, - &em_shm->event_group_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "event_group_init() failed:%" PRI_STAT "", stat); - - stat = queue_init(&em_shm->queue_tbl, &em_shm->queue_pool, - &em_shm->queue_pool_static); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "queue_init() failed:%" PRI_STAT "", stat); - - stat = queue_group_init(&em_shm->queue_group_tbl, - &em_shm->queue_group_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "queue_group_init() failed:%" PRI_STAT "", stat); - - stat = atomic_group_init(&em_shm->atomic_group_tbl, - &em_shm->atomic_group_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "atomic_group_init() failed:%" PRI_STAT "", stat); - - stat = eo_init(&em_shm->eo_tbl, &em_shm->eo_pool); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "eo_init() failed:%" PRI_STAT "", stat); - - stat = daemon_eo_queues_create(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "daemon_eo_queues_create() failed:%" PRI_STAT "", stat); - - daemon_eo_create(); - - /* timer add-on */ - if (conf->event_timer) { - stat = timer_init(&em_shm->timers); - RETURN_ERROR_IF(stat != EM_OK, - EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "timer_init() failed:%" PRI_STAT "", - stat); - } - - /* Initialize basic Event Chaining support */ - stat = chaining_init(&em_shm->event_chaining); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "chaining_init() failed:%" PRI_STAT "", stat); - - /* Initialize em_cli */ - stat = emcli_init(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, - "emcli_init() failed:%" PRI_STAT "", stat); - - return EM_OK; -} - -em_status_t -em_init_core(void) -{ - em_locm_t *const locm = &em_locm; - odp_shm_t shm; - em_shm_t *shm_addr; - em_status_t stat; - int init_count; - - /* Lookup the EM shared memory on each EM-core */ - shm = odp_shm_lookup("em_shm"); - RETURN_ERROR_IF(shm == ODP_SHM_INVALID, - EM_ERR_NOT_FOUND, EM_ESCOPE_INIT_CORE, - "Shared memory lookup failed!"); - - shm_addr = odp_shm_addr(shm); - RETURN_ERROR_IF(shm_addr == NULL, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, - "Shared memory ptr NULL"); - - if (shm_addr->conf.process_per_core && em_shm == NULL) - em_shm = shm_addr; - - RETURN_ERROR_IF(shm_addr != em_shm, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, - "Shared memory init fails: em_shm:%p != shm_addr:%p", - em_shm, shm_addr); - - /* Initialize core mappings not known yet in core_map_init() */ - stat = core_map_init_local(&em_shm->core_map); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "core_map_init_local() failed:%" PRI_STAT "", stat); - - stat = queue_group_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "queue_group_init_local() failed:%" PRI_STAT "", stat); - - stat = dispatch_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "dispatch_init_local() failed:%" PRI_STAT "", stat); - - /* Check if input_poll_fn should be executed on this core */ - stat = input_poll_init_local(&locm->do_input_poll, - locm->core_id, &em_shm->conf); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "input_poll_init_local() failed:%" PRI_STAT "", stat); - - /* Check if output_drain_fn should be executed on this core */ - stat = output_drain_init_local(&locm->do_output_drain, - locm->core_id, &em_shm->conf); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "output_drain_init_local() failed:%" PRI_STAT "", stat); - - stat = queue_init_local(); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_INIT_CORE, - "queue_init_local() failed:%" PRI_STAT "", stat); - - /* - * Initialize timer add-on. If global init was not done (config), - * this is just a NOP - */ - stat = timer_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "timer_init_local() failed:%" PRI_STAT "", stat); - - stat = sync_api_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "sync_api_init_local() failed:%" PRI_STAT "", stat); - - /* Init the EM CLI locally on this core (only if enabled) */ - stat = emcli_init_local(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, - "emcli_init_local() failed:%" PRI_STAT "", stat); - - /* This is an EM-core that will participate in EM event dispatching */ - locm->is_external_thr = false; - - env_spinlock_lock(&em_shm->init.lock); - init_count = ++em_shm->init.em_init_core_cnt; - env_spinlock_unlock(&em_shm->init.lock); - - /* Now OK to call EM APIs */ - - /* Print info about the Env&HW when the last core has initialized */ - if (init_count == em_core_count()) { - print_em_info(); - /* Last */ - em_shm->init.em_init_done = 1; - } - - env_sync_mem(); - - return EM_OK; -} - -em_status_t -em_term(const em_conf_t *conf) -{ - em_status_t stat; - int ret; - - (void)conf; - - if (em_shm->conf.event_timer) - timer_term(); - - stat = emcli_term(); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "emcli_term() failed:%" PRI_STAT "", stat); - - stat = chaining_term(&em_shm->event_chaining); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "chaining_term() failed:%" PRI_STAT "", stat); - - ret = em_libconfig_term_global(&em_shm->libconfig); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "EM config term failed:%d"); - - stat = pool_term(&em_shm->mpool_tbl); - RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "pool_term() failed:%" PRI_STAT "", stat); - - /* - * Free the EM shared memory - */ - ret = odp_shm_free(em_shm->this_shm); - RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, - "odp_shm_free() failed:%d", ret); - /* Set em_shm = NULL to allow a new call to em_init() */ - em_shm = NULL; - - return EM_OK; -} - -em_status_t -em_term_core(void) -{ - odp_event_t odp_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; - em_event_t em_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; - odp_queue_t odp_queue; - em_status_t stat = EM_OK; - em_status_t ret_stat = EM_OK; - bool esv_ena = esv_enabled(); - int num_events; - - if (em_core_id() == 0) - daemon_eo_shutdown(); - - /* Stop timer add-on. Just a NOP if timer was not enabled (config) */ - stat = timer_term_local(); - if (stat != EM_OK) { - ret_stat = stat; - INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, - "timer_term_local() fails: %" PRI_STAT "", stat); - } - - /* Term the EM CLI locally (if enabled) */ - stat = emcli_term_local(); - if (stat != EM_OK) { - ret_stat = stat; - INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, - "emcli_term_local() fails: %" PRI_STAT "", stat); - } - - /* Delete the local queues */ - stat = queue_term_local(); - if (stat != EM_OK) { - ret_stat = stat; - INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, - "queue_term_local() fails: %" PRI_STAT "", stat); - } - - /* - * Flush all events in the scheduler. - * Scheduler paused during return from em_dispatch() - */ - odp_schedule_resume(); - /* run loop twice: first with sched enabled and then paused */ - for (int i = 0; i < 2; i++) { - do { - num_events = - odp_schedule_multi_no_wait(&odp_queue, odp_ev_tbl, - EM_SCHED_MULTI_MAX_BURST); - if (num_events <= 0) - break; - /* - * Events might originate from outside of EM and need init. - */ - event_init_odp_multi(odp_ev_tbl, em_ev_tbl/*out*/, - ev_hdr_tbl/*out*/, num_events, - true/*is_extev*/); - if (esv_ena) - evstate_em2usr_multi(em_ev_tbl, ev_hdr_tbl, - num_events, EVSTATE__TERM); - - em_free_multi(em_ev_tbl, num_events); - } while (num_events > 0); - - odp_schedule_pause(); - } - - return ret_stat == EM_OK ? EM_OK : EM_ERR; -} +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine initialization and termination. + * + */ + +#include "em_include.h" +#include "add-ons/event_timer/em_timer.h" + +/** EM shared memory */ +em_shm_t *em_shm; + +/** Core local variables */ +ENV_LOCAL em_locm_t em_locm ENV_CACHE_LINE_ALIGNED = { + .current.egrp = EM_EVENT_GROUP_UNDEF, + .current.sched_context_type = EM_SCHED_CONTEXT_TYPE_NONE, + .local_queues.empty = 1, + .do_input_poll = false, + .do_output_drain = false, + .sync_api.in_progress = false + /* other members initialized to 0 or NULL as per C standard */ +}; + +void em_conf_init(em_conf_t *conf) +{ + if (unlikely(!conf)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_CONF_INIT, "Conf pointer NULL!"); + return; + } + memset(conf, 0, sizeof(em_conf_t)); + em_pool_cfg_init(&conf->default_pool_cfg); +} + +em_status_t +em_init(const em_conf_t *conf) +{ + em_status_t stat; + int ret; + + RETURN_ERROR_IF(!conf, EM_FATAL(EM_ERR_BAD_POINTER), EM_ESCOPE_INIT, + "Conf pointer NULL!"); + + stat = early_log_init(conf->log.log_fn, conf->log.vlog_fn); + RETURN_ERROR_IF(stat != EM_OK, EM_FATAL(stat), + EM_ESCOPE_INIT, "User provided log funcs invalid!"); + + /* Sanity check: em_shm should not be set yet */ + RETURN_ERROR_IF(em_shm != NULL, + EM_FATAL(EM_ERR_BAD_STATE), EM_ESCOPE_INIT, + "EM shared memory ptr set - already initialized?"); + /* Sanity check: either process- or thread-per-core, but not both */ + RETURN_ERROR_IF(!(conf->process_per_core ^ conf->thread_per_core), + EM_FATAL(EM_ERR_BAD_ARG), EM_ESCOPE_INIT, + "Select EITHER process-per-core OR thread-per-core!"); + + /* + * Reserve the EM shared memory once at start-up. + */ + uint32_t flags = 0; + +#if ODP_VERSION_API_NUM(1, 33, 0) > ODP_VERSION_API + flags |= ODP_SHM_SINGLE_VA; +#else + odp_shm_capability_t shm_capa; + + ret = odp_shm_capability(&shm_capa); + RETURN_ERROR_IF(ret, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, + "shm capability error:%d", ret); + + if (shm_capa.flags & ODP_SHM_SINGLE_VA) + flags |= ODP_SHM_SINGLE_VA; +#endif + odp_shm_t shm = odp_shm_reserve("em_shm", sizeof(em_shm_t), + ODP_CACHE_LINE_SIZE, flags); + + RETURN_ERROR_IF(shm == ODP_SHM_INVALID, EM_ERR_ALLOC_FAILED, + EM_ESCOPE_INIT, "Shared memory reservation failed!"); + + em_shm = odp_shm_addr(shm); + + RETURN_ERROR_IF(em_shm == NULL, EM_ERR_NOT_FOUND, EM_ESCOPE_INIT, + "Shared memory ptr NULL!"); + + memset(em_shm, 0, sizeof(em_shm_t)); + + /* Store shm handle, can be used in em_term() to free the memory */ + em_shm->this_shm = shm; + + /* Store the given EM configuration */ + em_shm->conf = *conf; + + if (!EM_API_HOOKS_ENABLE) { + memset(&em_shm->conf.api_hooks, 0, + sizeof(em_shm->conf.api_hooks)); + } + + env_spinlock_init(&em_shm->init.lock); + + /* Initialize the log & error handling */ + log_init(); + error_init(); + + /* Initialize libconfig */ + ret = em_libconfig_init_global(&em_shm->libconfig); + RETURN_ERROR_IF(ret != 0, EM_ERR_OPERATION_FAILED, EM_ESCOPE_INIT, + "libconfig initialization failed:%d", ret); + + /* + * Initialize the physical-core <-> EM-core mapping + * + * EM-core <-> ODP-thread id mappings cannot be set up yet, + * the ODP thread id is assigned only when that thread is initialized. + * Set this mapping in core_map_init_local() + */ + stat = core_map_init(&em_shm->core_map, conf->core_count, + &conf->phys_mask); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "core_map_init() failed:%" PRI_STAT "", stat); + + /* Initialize the EM event dispatcher */ + stat = dispatch_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "dispatch_init() failed:%" PRI_STAT "", stat); + + /* + * Check validity of core masks for input_poll_fn and output_drain_fn. + * + * Masks must be a subset of logical EM core mask. Zero mask means + * that input_poll_fn and output_drain_fn are run on all EM cores. + */ + stat = input_poll_init(&em_shm->core_map.logic_mask, conf); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "input_poll_init() failed:%" PRI_STAT "", stat); + stat = output_drain_init(&em_shm->core_map.logic_mask, conf); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "output_drain_init() failed:%" PRI_STAT "", stat); + + /* + * Initialize Event State Verification (ESV), if enabled at compile time + */ + if (EM_ESV_ENABLE) { + stat = esv_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "esv_init() failed:%" PRI_STAT "", stat); + } + + /* Initialize EM callbacks/hooks */ + stat = hooks_init(&conf->api_hooks, &conf->idle_hooks); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "hooks_init() failed:%" PRI_STAT "", stat); + + /* + * Initialize the EM buffer pools and create the EM_DEFAULT_POOL. + * Create also startup pools if configured in the runtime config + * file through option 'startup_pools'. + */ + stat = pool_init(&em_shm->mpool_tbl, &em_shm->mpool_pool, + &conf->default_pool_cfg); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "pool_init() failed:%" PRI_STAT "", stat); + + stat = event_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "event_init() failed:%" PRI_STAT "", stat); + + stat = event_group_init(&em_shm->event_group_tbl, + &em_shm->event_group_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "event_group_init() failed:%" PRI_STAT "", stat); + + stat = queue_init(&em_shm->queue_tbl, &em_shm->queue_pool, + &em_shm->queue_pool_static); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "queue_init() failed:%" PRI_STAT "", stat); + + stat = queue_group_init(&em_shm->queue_group_tbl, + &em_shm->queue_group_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "queue_group_init() failed:%" PRI_STAT "", stat); + + stat = atomic_group_init(&em_shm->atomic_group_tbl, + &em_shm->atomic_group_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "atomic_group_init() failed:%" PRI_STAT "", stat); + + stat = eo_init(&em_shm->eo_tbl, &em_shm->eo_pool); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "eo_init() failed:%" PRI_STAT "", stat); + + stat = daemon_eo_queues_create(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "daemon_eo_queues_create() failed:%" PRI_STAT "", stat); + + daemon_eo_create(); + + /* timer add-on */ + if (conf->event_timer) { + stat = timer_init(&em_shm->timers); + RETURN_ERROR_IF(stat != EM_OK, + EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "timer_init() failed:%" PRI_STAT "", + stat); + } + + /* Initialize basic Event Chaining support */ + stat = chaining_init(&em_shm->event_chaining); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "chaining_init() failed:%" PRI_STAT "", stat); + + /* Initialize em_cli */ + stat = emcli_init(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT, + "emcli_init() failed:%" PRI_STAT "", stat); + + return EM_OK; +} + +em_status_t +em_init_core(void) +{ + em_locm_t *const locm = &em_locm; + odp_shm_t shm; + em_shm_t *shm_addr; + em_status_t stat; + int init_count; + + /* Lookup the EM shared memory on each EM-core */ + shm = odp_shm_lookup("em_shm"); + RETURN_ERROR_IF(shm == ODP_SHM_INVALID, + EM_ERR_NOT_FOUND, EM_ESCOPE_INIT_CORE, + "Shared memory lookup failed!"); + + shm_addr = odp_shm_addr(shm); + RETURN_ERROR_IF(shm_addr == NULL, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, + "Shared memory ptr NULL"); + + if (shm_addr->conf.process_per_core && em_shm == NULL) + em_shm = shm_addr; + + RETURN_ERROR_IF(shm_addr != em_shm, EM_ERR_BAD_POINTER, EM_ESCOPE_INIT_CORE, + "Shared memory init fails: em_shm:%p != shm_addr:%p", + em_shm, shm_addr); + + /* Initialize core mappings not known yet in core_map_init() */ + stat = core_map_init_local(&em_shm->core_map); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "core_map_init_local() failed:%" PRI_STAT "", stat); + + stat = queue_group_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "queue_group_init_local() failed:%" PRI_STAT "", stat); + + stat = dispatch_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "dispatch_init_local() failed:%" PRI_STAT "", stat); + + /* Check if input_poll_fn should be executed on this core */ + stat = input_poll_init_local(&locm->do_input_poll, + locm->core_id, &em_shm->conf); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "input_poll_init_local() failed:%" PRI_STAT "", stat); + + /* Check if output_drain_fn should be executed on this core */ + stat = output_drain_init_local(&locm->do_output_drain, + locm->core_id, &em_shm->conf); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "output_drain_init_local() failed:%" PRI_STAT "", stat); + + stat = queue_init_local(); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_INIT_CORE, + "queue_init_local() failed:%" PRI_STAT "", stat); + + /* + * Initialize timer add-on. If global init was not done (config), + * this is just a NOP + */ + stat = timer_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "timer_init_local() failed:%" PRI_STAT "", stat); + + stat = sync_api_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "sync_api_init_local() failed:%" PRI_STAT "", stat); + + /* Init the EM CLI locally on this core (only if enabled) */ + stat = emcli_init_local(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_INIT_CORE, + "emcli_init_local() failed:%" PRI_STAT "", stat); + + /* This is an EM-core that will participate in EM event dispatching */ + locm->is_external_thr = false; + + /* Initialize debug timestamps to 1 if enabled to differentiate from disabled */ + if (EM_DEBUG_TIMESTAMP_ENABLE) + for (int i = 0; i < EM_DEBUG_TSP_LAST; i++) + locm->debug_ts[i] = 1; + + env_spinlock_lock(&em_shm->init.lock); + init_count = ++em_shm->init.em_init_core_cnt; + env_spinlock_unlock(&em_shm->init.lock); + + /* Now OK to call EM APIs */ + + /* Print info about the Env&HW when the last core has initialized */ + if (init_count == em_core_count()) { + print_em_info(); + /* Last */ + em_shm->init.em_init_done = 1; + } + + env_sync_mem(); + + return EM_OK; +} + +em_status_t +em_term(const em_conf_t *conf) +{ + em_status_t stat; + int ret; + + (void)conf; + + if (em_shm->conf.event_timer) + timer_term(); + + stat = emcli_term(); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "emcli_term() failed:%" PRI_STAT "", stat); + + stat = chaining_term(&em_shm->event_chaining); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "chaining_term() failed:%" PRI_STAT "", stat); + + ret = em_libconfig_term_global(&em_shm->libconfig); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "EM config term failed:%d"); + + stat = pool_term(&em_shm->mpool_tbl); + RETURN_ERROR_IF(stat != EM_OK, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "pool_term() failed:%" PRI_STAT "", stat); + + /* + * Free the EM shared memory + */ + ret = odp_shm_free(em_shm->this_shm); + RETURN_ERROR_IF(ret != 0, EM_ERR_LIB_FAILED, EM_ESCOPE_TERM, + "odp_shm_free() failed:%d", ret); + /* Set em_shm = NULL to allow a new call to em_init() */ + em_shm = NULL; + + return EM_OK; +} + +em_status_t +em_term_core(void) +{ + odp_event_t odp_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + event_hdr_t *ev_hdr_tbl[EM_SCHED_MULTI_MAX_BURST]; + em_event_t em_ev_tbl[EM_SCHED_MULTI_MAX_BURST]; + odp_queue_t odp_queue; + em_status_t stat = EM_OK; + em_status_t ret_stat = EM_OK; + bool esv_ena = esv_enabled(); + int num_events; + + if (em_core_id() == 0) + daemon_eo_shutdown(); + + /* Stop timer add-on. Just a NOP if timer was not enabled (config) */ + stat = timer_term_local(); + if (stat != EM_OK) { + ret_stat = stat; + INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, + "timer_term_local() fails: %" PRI_STAT "", stat); + } + + /* Term the EM CLI locally (if enabled) */ + stat = emcli_term_local(); + if (stat != EM_OK) { + ret_stat = stat; + INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, + "emcli_term_local() fails: %" PRI_STAT "", stat); + } + + /* Delete the local queues */ + stat = queue_term_local(); + if (stat != EM_OK) { + ret_stat = stat; + INTERNAL_ERROR(stat, EM_ESCOPE_TERM_CORE, + "queue_term_local() fails: %" PRI_STAT "", stat); + } + + /* + * Flush all events in the scheduler. + * Scheduler paused during return from em_dispatch() + */ + odp_schedule_resume(); + /* run loop twice: first with sched enabled and then paused */ + for (int i = 0; i < 2; i++) { + do { + num_events = + odp_schedule_multi_no_wait(&odp_queue, odp_ev_tbl, + EM_SCHED_MULTI_MAX_BURST); + if (num_events <= 0) + break; + /* + * Events might originate from outside of EM and need init. + */ + event_init_odp_multi(odp_ev_tbl, em_ev_tbl/*out*/, + ev_hdr_tbl/*out*/, num_events, + true/*is_extev*/); + if (esv_ena) + evstate_em2usr_multi(em_ev_tbl, ev_hdr_tbl, + num_events, EVSTATE__TERM); + + em_free_multi(em_ev_tbl, num_events); + } while (num_events > 0); + + odp_schedule_pause(); + } + + return ret_stat == EM_OK ? EM_OK : EM_ERR; +} diff --git a/src/event_machine_odp_ext.c b/src/event_machine_odp_ext.c index fb4f617f..edd07b1b 100644 --- a/src/event_machine_odp_ext.c +++ b/src/event_machine_odp_ext.c @@ -1,192 +1,369 @@ -/* - * Copyright (c) 2015-2021, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - - /** - * @file - * Event Machine ODP API extensions - * - */ - -#include -#include - -odp_queue_t em_odp_queue_odp(em_queue_t queue) -{ - const queue_elem_t *queue_elem = queue_elem_get(queue); - - if (unlikely(queue_elem == NULL)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_ODP_EXT, - "queue_elem ptr NULL!"); - return ODP_QUEUE_INVALID; - } - - return queue_elem->odp_queue; -} - -em_queue_t em_odp_queue_em(odp_queue_t queue) -{ - const queue_elem_t *queue_elem = odp_queue_context(queue); - - if (unlikely(queue_elem == NULL)) - return EM_QUEUE_UNDEF; - - return queue_elem->queue; -} - -uint32_t em_odp_event_hdr_size(void) -{ - return sizeof(event_hdr_t); -} - -odp_event_t em_odp_event2odp(em_event_t event) -{ - return event_em2odp(event); -} - -void em_odp_events2odp(const em_event_t events[/*num*/], - odp_event_t odp_events[/*out:num*/], int num) -{ - if (unlikely(num <= 0)) - return; - - events_em2odp(events, odp_events/*out*/, num); -} - -em_event_t em_odp_event2em(odp_event_t odp_event) -{ - em_event_t event = event_init_odp(odp_event, false/*!is_extev*/, NULL); - - return event; -} - -void em_odp_events2em(const odp_event_t odp_events[/*num*/], - em_event_t events[/*out:num*/], int num) -{ - if (unlikely(num <= 0)) - return; - - event_hdr_t *ev_hdrs[num]; - - event_init_odp_multi(odp_events, events/*out*/, ev_hdrs/*out*/, num, - false/*!is_extev*/); -} - -int em_odp_pool2odp(em_pool_t pool, odp_pool_t odp_pools[/*out*/], int num) -{ - const mpool_elem_t *pool_elem = pool_elem_get(pool); - - if (unlikely(!pool_elem || !pool_allocated(pool_elem) || - !odp_pools || num <= 0)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ODP_EXT, - "Inv.args: pool:%" PRI_POOL " odp_pools:%p num:%d", - pool, odp_pools, num); - return 0; - } - - int num_subpools = MIN(num, pool_elem->num_subpools); - int i; - - for (i = 0; i < num_subpools; i++) - odp_pools[i] = pool_elem->odp_pool[i]; - - /* return the number of odp-pools filled into 'odp_pools[]' */ - return num_subpools; -} - -em_pool_t em_odp_pool2em(odp_pool_t odp_pool) -{ - if (unlikely(odp_pool == ODP_POOL_INVALID)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ODP_EXT, - "Inv.arg: odp_pool invalid"); - return EM_POOL_UNDEF; - } - - return pool_odp2em(odp_pool); -} - -int pkt_enqueue(const odp_packet_t pkt_tbl[/*num*/], int num, em_queue_t queue) -{ - if (unlikely(!pkt_tbl || num <= 0)) - return 0; - - queue_elem_t *const q_elem = queue_elem_get(queue); - odp_event_t odp_event_tbl[num]; - int sent = 0; - - odp_packet_to_event_multi(pkt_tbl, odp_event_tbl/*out*/, num); - - if (q_elem != NULL && q_elem->scheduled) { - /* - * Enqueue the events into a scheduled em-odp queue. - * No need to init the ev-hdrs - init is done in dispatch. - */ - sent = odp_queue_enq_multi(q_elem->odp_queue, - odp_event_tbl, num); - } else { - em_event_t event_tbl[num]; - event_hdr_t *evhdr_tbl[num]; - - events_odp2em(odp_event_tbl, event_tbl/*out*/, num); - - /* Init the event-hdrs for incoming non-scheduled pkts */ - event_init_pkt_multi(pkt_tbl, event_tbl/*in/out*/, - evhdr_tbl/*out*/, num, true /*is_extev*/); - - if (q_elem == NULL) { - /* Send directly out via event chaining */ - if (likely(queue_external(queue))) - sent = send_chaining_multi(event_tbl, evhdr_tbl, - num, queue); - } else if (q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED) { - /* Enqueue into an unscheduled em-odp queue */ - sent = odp_queue_enq_multi(q_elem->odp_queue, - odp_event_tbl, num); - } else if (q_elem->type == EM_QUEUE_TYPE_LOCAL) { - /* Send into an local em-odp queue */ - sent = send_local_multi(event_tbl, evhdr_tbl, - num, q_elem); - } else if (q_elem->type == EM_QUEUE_TYPE_OUTPUT) { - /* Send directly out via an output em-odp queue */ - sent = send_output_multi(event_tbl, evhdr_tbl, - num, q_elem); - } - } - - if (unlikely(sent < num)) { - sent = unlikely(sent < 0) ? 0 : sent; - odp_packet_free_multi(&pkt_tbl[sent], num - sent); - /* - * Event state checking: No need to adjust the event state - * since the events were never enqueued into EM. - */ - } - - return sent; -} +/* + * Copyright (c) 2015-2021, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + + /** + * @file + * Event Machine ODP API extensions + * + */ + +#include +#include + +odp_queue_t em_odp_queue_odp(em_queue_t queue) +{ + const queue_elem_t *queue_elem = queue_elem_get(queue); + + if (unlikely(queue_elem == NULL)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_ODP_EXT, + "queue_elem ptr NULL!"); + return ODP_QUEUE_INVALID; + } + + return queue_elem->odp_queue; +} + +em_queue_t em_odp_queue_em(odp_queue_t queue) +{ + const queue_elem_t *queue_elem = odp_queue_context(queue); + + /* verify that the odp context is an EM queue elem */ + if (unlikely(!queue_elem || + queue_elem->valid_check != QUEUE_ELEM_VALID)) + return EM_QUEUE_UNDEF; + + return queue_elem->queue; +} + +/** + * @brief Helper to em_odp_pktin_event_queues2em() + * + * @param odp_queue ODP pktin-queue to convert to an EM-queue. + * The given ODP queue handle must have been returned by + * odp_pktin_event_queue(). + * @return em_queue_t: New EM queue mapped to use the ODP pktin event queue + */ +static em_queue_t pktin_event_queue2em(odp_queue_t odp_queue) +{ + em_queue_t queue = EM_QUEUE_UNDEF; /* return value */ + const char *err_str = ""; + odp_queue_info_t odp_qinfo; + int ret = 0; + + if (unlikely(odp_queue == ODP_QUEUE_INVALID)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ODP_EXT, + "Bad arg: ODP queue invalid!"); + return EM_QUEUE_UNDEF; + } + + queue = em_odp_queue_em(odp_queue); + if (unlikely(queue != EM_QUEUE_UNDEF)) { + /* The given ODP queue is already associated with an EM queue */ + return queue; + } + + ret = odp_queue_info(odp_queue, &odp_qinfo); + if (unlikely(ret || odp_qinfo.param.type != ODP_QUEUE_TYPE_SCHED)) { + err_str = "odp_queue_info(): unsuitable odp queue"; + goto err_return; + } + + /* + * Determine EM queue priority: + */ + odp_schedule_prio_t odp_prio = odp_schedule_default_prio(); + em_queue_prio_t prio = EM_QUEUE_PRIO_UNDEF; + int num_prio = em_queue_get_num_prio(NULL); + + for (int i = 0; i < num_prio; i++) { + prio_em2odp(i, &odp_prio/*out*/); + if (odp_prio == odp_qinfo.param.sched.prio) { + prio = i; + break; + } + } + if (unlikely(prio == EM_QUEUE_PRIO_UNDEF)) { + err_str = "Can't convert ODP qprio to EM qprio"; + goto err_return; + } + + /* + * Determine scheduled EM queue type + */ + em_queue_type_t queue_type = EM_QUEUE_TYPE_UNDEF; + + ret = scheduled_queue_type_odp2em(odp_qinfo.param.sched.sync, + &queue_type /*out*/); + if (unlikely(ret)) { + err_str = "Can't convert ODP qtype to EM qtype"; + goto err_return; + } + + /* + * Determine EM queue group + */ + em_queue_group_t queue_group; + const queue_group_elem_t *qgrp_elem; + + queue_group = em_queue_group_get_first(NULL); + while (queue_group != EM_QUEUE_GROUP_UNDEF) { + qgrp_elem = queue_group_elem_get(queue_group); + if (qgrp_elem && + qgrp_elem->odp_sched_group == odp_qinfo.param.sched.group) + break; /* found match! */ + queue_group = em_queue_group_get_next(); + } + if (unlikely(queue_group == EM_QUEUE_GROUP_UNDEF)) { + err_str = "No matching EM Queue Group found"; + goto err_return; + } + + /* + * Set EM queue name based on the ODP queue name + */ + char q_name[ODP_QUEUE_NAME_LEN]; + + snprintf(q_name, sizeof(q_name), "EM:%s", odp_qinfo.name); + q_name[ODP_QUEUE_NAME_LEN - 1] = '\0'; + + /* + * Set up the EM queue based on gathered info + */ + queue_setup_t setup = {.name = q_name, + .type = queue_type, + .prio = prio, + .atomic_group = EM_ATOMIC_GROUP_UNDEF, + .queue_group = queue_group, + .conf = NULL}; + + queue = queue_alloc(EM_QUEUE_UNDEF, &err_str); + if (unlikely(queue == EM_QUEUE_UNDEF)) + goto err_return; /* err_str set by queue_alloc() */ + + queue_elem_t *q_elem = queue_elem_get(queue); + + if (unlikely(!q_elem)) { + err_str = "Queue elem NULL!"; + goto err_return; + } + + /* Set common queue-elem fields based on 'setup' */ + queue_setup_common(q_elem, &setup); + /* Set queue-elem fields for a pktin event queue */ + q_elem->odp_queue = odp_queue; + q_elem->is_pktin = true; + q_elem->scheduled = EM_TRUE; + q_elem->state = EM_QUEUE_STATE_INIT; + + /* + * Note: The ODP queue context points to the EM queue elem. + * The EM queue context set by the user using the API function + * em_queue_set_context() is accessed through the queue_elem_t::context + * and retrieved with em_queue_get_context() or passed by EM to the + * EO-receive function for scheduled queues. + * + * Set the odp context data length (in bytes) for potential prefetching. + * The ODP implementation may use this value as a hint for the number + * of context data bytes to prefetch. + */ + ret = odp_queue_context_set(odp_queue, q_elem, sizeof(*q_elem)); + if (unlikely(ret)) { + err_str = "odp_queue_context_set() failed"; + goto err_return; + } + + return queue; /* success */ + +err_return: + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_ODP_EXT, + "%s (ret=%d)", err_str, ret); + if (EM_DEBUG_PRINT && odp_queue != ODP_QUEUE_INVALID) + odp_queue_print(odp_queue); + if (queue != EM_QUEUE_UNDEF) + queue_free(queue); + return EM_QUEUE_UNDEF; +} + +int em_odp_pktin_event_queues2em(const odp_queue_t odp_pktin_event_queues[/*num*/], + em_queue_t queues[/*out:num*/], int num) +{ + int i; + + for (i = 0; i < num; i++) { + queues[i] = pktin_event_queue2em(odp_pktin_event_queues[i]); + if (unlikely(queues[i] == EM_QUEUE_UNDEF)) { + INTERNAL_ERROR(EM_ERR_OPERATION_FAILED, EM_ESCOPE_ODP_EXT, + "Cannot create EM-Q using pktin-queue:%d (hdl:%" PRIu64 ")", + i, odp_queue_to_u64(odp_pktin_event_queues[i])); + break; + } + } + + return i; +} + +uint32_t em_odp_event_hdr_size(void) +{ + return sizeof(event_hdr_t); +} + +odp_event_t em_odp_event2odp(em_event_t event) +{ + return event_em2odp(event); +} + +void em_odp_events2odp(const em_event_t events[/*num*/], + odp_event_t odp_events[/*out:num*/], int num) +{ + if (unlikely(num <= 0)) + return; + + events_em2odp(events, odp_events/*out*/, num); +} + +em_event_t em_odp_event2em(odp_event_t odp_event) +{ + em_event_t event = event_init_odp(odp_event, false/*!is_extev*/, NULL); + + return event; +} + +void em_odp_events2em(const odp_event_t odp_events[/*num*/], + em_event_t events[/*out:num*/], int num) +{ + if (unlikely(num <= 0)) + return; + + event_hdr_t *ev_hdrs[num]; + + event_init_odp_multi(odp_events, events/*out*/, ev_hdrs/*out*/, num, + false/*!is_extev*/); +} + +int em_odp_pool2odp(em_pool_t pool, odp_pool_t odp_pools[/*out*/], int num) +{ + const mpool_elem_t *pool_elem = pool_elem_get(pool); + + if (unlikely(!pool_elem || !pool_allocated(pool_elem) || + !odp_pools || num <= 0)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ODP_EXT, + "Inv.args: pool:%" PRI_POOL " odp_pools:%p num:%d", + pool, odp_pools, num); + return 0; + } + + int num_subpools = MIN(num, pool_elem->num_subpools); + + for (int i = 0; i < num_subpools; i++) + odp_pools[i] = pool_elem->odp_pool[i]; + + /* return the number of odp-pools filled into 'odp_pools[]' */ + return num_subpools; +} + +em_pool_t em_odp_pool2em(odp_pool_t odp_pool) +{ + if (unlikely(odp_pool == ODP_POOL_INVALID)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_ODP_EXT, + "Inv.arg: odp_pool invalid"); + return EM_POOL_UNDEF; + } + + return pool_odp2em(odp_pool); +} + +int pkt_enqueue(const odp_packet_t pkt_tbl[/*num*/], int num, em_queue_t queue) +{ + if (unlikely(!pkt_tbl || num <= 0)) + return 0; + + queue_elem_t *const q_elem = queue_elem_get(queue); + odp_event_t odp_event_tbl[num]; + int sent = 0; + + odp_packet_to_event_multi(pkt_tbl, odp_event_tbl/*out*/, num); + + if (q_elem != NULL && q_elem->scheduled) { + /* + * Enqueue the events into a scheduled em-odp queue. + * No need to init the ev-hdrs - init is done in dispatch. + */ + sent = odp_queue_enq_multi(q_elem->odp_queue, + odp_event_tbl, num); + } else { + em_event_t event_tbl[num]; + event_hdr_t *evhdr_tbl[num]; + + events_odp2em(odp_event_tbl, event_tbl/*out*/, num); + + /* Init the event-hdrs for incoming non-scheduled pkts */ + event_init_pkt_multi(pkt_tbl, event_tbl/*in/out*/, + evhdr_tbl/*out*/, num, true /*is_extev*/); + + if (q_elem == NULL) { + /* Send directly out via event chaining */ + if (likely(queue_external(queue))) + sent = send_chaining_multi(event_tbl, num, queue); + } else if (q_elem->type == EM_QUEUE_TYPE_UNSCHEDULED) { + /* Enqueue into an unscheduled em-odp queue */ + sent = odp_queue_enq_multi(q_elem->odp_queue, + odp_event_tbl, num); + } else if (q_elem->type == EM_QUEUE_TYPE_LOCAL) { + /* Send into an local em-odp queue */ + sent = send_local_multi(event_tbl, num, q_elem); + } else if (q_elem->type == EM_QUEUE_TYPE_OUTPUT) { + /* Send directly out via an output em-odp queue */ + sent = send_output_multi(event_tbl, num, q_elem); + } + } + + if (unlikely(sent < num)) { + sent = unlikely(sent < 0) ? 0 : sent; + odp_packet_free_multi(&pkt_tbl[sent], num - sent); + /* + * Event state checking: No need to adjust the event state + * since the events were never enqueued into EM. + */ + } + + return sent; +} + +odp_schedule_group_t em_odp_qgrp2odp(em_queue_group_t queue_group) +{ + const queue_group_elem_t *qgrp_elem = + queue_group_elem_get(queue_group); + + RETURN_ERROR_IF(!qgrp_elem || !queue_group_allocated(qgrp_elem), + EM_ERR_BAD_ARG, EM_ESCOPE_ODP_EXT, + "Invalid queue group:%" PRI_QGRP "", queue_group); + + return qgrp_elem->odp_sched_group; +} diff --git a/src/event_machine_pool.c b/src/event_machine_pool.c index 3c3c2a1f..a098ab5b 100644 --- a/src/event_machine_pool.c +++ b/src/event_machine_pool.c @@ -1,290 +1,298 @@ -/* - * Copyright (c) 2018, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * - * Event Machine event pool functions. - * - */ - -#include "em_include.h" - -/* per core (thread) state for em_atomic_group_get_next() */ -static ENV_LOCAL unsigned int _pool_tbl_iter_idx; - -void em_pool_cfg_init(em_pool_cfg_t *const pool_cfg) -{ - odp_pool_param_t odp_pool_defaults; - uint32_t buf_cache_sz; - uint32_t pkt_cache_sz; - uint32_t cache_sz; - - if (unlikely(!pool_cfg)) { - INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), - EM_ESCOPE_POOL_CFG_INIT, - "pool_cfg pointer NULL!"); - return; - } - - odp_pool_param_init(&odp_pool_defaults); - memset(pool_cfg, 0, sizeof(*pool_cfg)); - - pool_cfg->event_type = EM_EVENT_TYPE_UNDEF; - - buf_cache_sz = odp_pool_defaults.buf.cache_size; - pkt_cache_sz = odp_pool_defaults.pkt.cache_size; - cache_sz = MIN(buf_cache_sz, pkt_cache_sz); - - for (int i = 0; i < EM_MAX_SUBPOOLS; i++) - pool_cfg->subpool[i].cache_size = cache_sz; - - pool_cfg->__internal_check = EM_CHECK_INIT_CALLED; -} - -em_pool_t -em_pool_create(const char *name, em_pool_t pool, const em_pool_cfg_t *pool_cfg) -{ - em_pool_t pool_created; - const char *err_str = ""; - - /* Verify config */ - int err = invalid_pool_cfg(pool_cfg, &err_str); - - if (unlikely(err)) { - INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_POOL_CREATE, - "Pool create: invalid pool-config:%d:\n" - "%s", err, err_str); - return EM_POOL_UNDEF; - } - - pool_created = pool_create(name, pool, pool_cfg); - - if (unlikely(pool_created == EM_POOL_UNDEF || - (pool != EM_POOL_UNDEF && pool != pool_created))) { - INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_POOL_CREATE, - "Pool create failed,\n" - "requested:%" PRI_POOL " created:%" PRI_POOL "", - pool, pool_created); - return EM_POOL_UNDEF; - } - - return pool_created; -} - -em_status_t -em_pool_delete(em_pool_t pool) -{ - em_status_t stat; - - stat = pool_delete(pool); - RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_POOL_DELETE, - "Pool delete failed"); - - return EM_OK; -} - -em_pool_t -em_pool_find(const char *name) -{ - if (name && *name) - return pool_find(name); - - return EM_POOL_UNDEF; -} - -size_t -em_pool_get_name(em_pool_t pool, char *name /*out*/, size_t maxlen) -{ - const mpool_elem_t *mpool_elem = pool_elem_get(pool); - size_t len = 0; - - if (unlikely(name == NULL || maxlen == 0)) { - INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_POOL_GET_NAME, - "Invalid args: name=0x%" PRIx64 ", maxlen=%zu", - name, maxlen); - return 0; - } - - if (unlikely(mpool_elem == NULL || !pool_allocated(mpool_elem))) { - INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_POOL_GET_NAME, - "Invalid Pool:%" PRI_POOL "", pool); - name[0] = '\0'; - return 0; - } - - len = strnlen(mpool_elem->name, sizeof(mpool_elem->name) - 1); - if (maxlen - 1 < len) - len = maxlen - 1; - - memcpy(name, mpool_elem->name, len); - name[len] = '\0'; - - return len; -} - -em_pool_t -em_pool_get_first(unsigned int *num) -{ - const mpool_elem_t *const mpool_elem_tbl = em_shm->mpool_tbl.pool; - const mpool_elem_t *mpool_elem = &mpool_elem_tbl[0]; - const unsigned int pool_cnt = pool_count(); - - _pool_tbl_iter_idx = 0; /* reset iteration */ - - if (num) - *num = pool_cnt; - - if (pool_cnt == 0) { - _pool_tbl_iter_idx = EM_CONFIG_POOLS; /* UNDEF = _get_next() */ - return EM_POOL_UNDEF; - } - - /* find first */ - while (!pool_allocated(mpool_elem)) { - _pool_tbl_iter_idx++; - if (_pool_tbl_iter_idx >= EM_CONFIG_POOLS) - return EM_POOL_UNDEF; - mpool_elem = &mpool_elem_tbl[_pool_tbl_iter_idx]; - } - - return pool_idx2hdl(_pool_tbl_iter_idx); -} - -em_pool_t -em_pool_get_next(void) -{ - if (_pool_tbl_iter_idx >= EM_CONFIG_POOLS - 1) - return EM_POOL_UNDEF; - - _pool_tbl_iter_idx++; - - const mpool_elem_t *const mpool_elem_tbl = em_shm->mpool_tbl.pool; - const mpool_elem_t *mpool_elem = &mpool_elem_tbl[_pool_tbl_iter_idx]; - - /* find next */ - while (!pool_allocated(mpool_elem)) { - _pool_tbl_iter_idx++; - if (_pool_tbl_iter_idx >= EM_CONFIG_POOLS) - return EM_POOL_UNDEF; - mpool_elem = &mpool_elem_tbl[_pool_tbl_iter_idx]; - } - - return pool_idx2hdl(_pool_tbl_iter_idx); -} - -em_status_t -em_pool_info(em_pool_t pool, em_pool_info_t *pool_info /*out*/) -{ - const mpool_elem_t *pool_elem; - - RETURN_ERROR_IF(pool_info == NULL, - EM_ERR_BAD_POINTER, EM_ESCOPE_POOL_INFO, - "arg 'pool_info' invalid"); - - pool_elem = pool_elem_get(pool); - RETURN_ERROR_IF(pool_elem == NULL || !pool_allocated(pool_elem), - EM_ERR_BAD_ID, EM_ESCOPE_POOL_INFO, - "EM-pool:%" PRI_POOL " invalid", pool); - - memset(pool_info, 0, sizeof(*pool_info)); - /* copy pool info into the user provided 'pool_info' */ - strncpy(pool_info->name, pool_elem->name, sizeof(pool_info->name)); - pool_info->name[sizeof(pool_info->name) - 1] = '\0'; - pool_info->em_pool = pool_elem->em_pool; - pool_info->event_type = pool_elem->event_type; - pool_info->align_offset = pool_elem->align_offset; - pool_info->user_area_size = pool_elem->user_area.req_size; - pool_info->num_subpools = pool_elem->num_subpools; - - for (int i = 0; i < pool_elem->num_subpools; i++) { - pool_info->subpool[i].size = pool_elem->size[i]; /*sorted sz*/ - pool_info->subpool[i].num = pool_elem->pool_cfg.subpool[i].num; - pool_info->subpool[i].cache_size = pool_elem->pool_cfg.subpool[i].cache_size; - } - - /* - * EM pool usage statistics only collected if - * EM config file: pool.statistics_enable=true. - */ - if (!em_shm->opt.pool.statistics_enable) - return EM_OK; /* no statistics, return */ - - /* EM pool usage statistics _enabled_ - collect it: */ - for (int i = 0; i < pool_elem->num_subpools; i++) { - const uint64_t num = pool_elem->pool_cfg.subpool[i].num; - uint64_t used = 0; - uint64_t free = 0; - - odp_pool_stats_t odp_stats; - - int ret = odp_pool_stats(pool_elem->odp_pool[i], &odp_stats); - - RETURN_ERROR_IF(ret, EM_ERR_LIB_FAILED, EM_ESCOPE_POOL_INFO, - "EM-pool:%" PRI_POOL " subpool:%d stats failed:%d", - pool, i, ret); - /* ODP inactive counters are zero, it is safe to add both: */ - free = odp_stats.available + odp_stats.cache_available; - used = num - free; - - /* Sanity check */ - if (free > num) - free = num; - if (used > num) - used = num; - - pool_info->subpool[i].used = used; - pool_info->subpool[i].free = free; - } - - return EM_OK; -} - -void -em_pool_info_print(em_pool_t pool) -{ - pool_info_print_hdr(1); - pool_info_print(pool); -} - -void -em_pool_info_print_all(void) -{ - em_pool_t pool; - unsigned int num; - - pool = em_pool_get_first(&num); - - pool_info_print_hdr(num); - while (pool != EM_POOL_UNDEF) { - pool_info_print(pool); - pool = em_pool_get_next(); - } -} +/* + * Copyright (c) 2018, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * + * Event Machine event pool functions. + * + */ + +#include "em_include.h" + +/* per core (thread) state for em_atomic_group_get_next() */ +static ENV_LOCAL unsigned int _pool_tbl_iter_idx; + +void em_pool_cfg_init(em_pool_cfg_t *const pool_cfg) +{ + odp_pool_param_t odp_pool_defaults; + uint32_t buf_cache_sz; + uint32_t pkt_cache_sz; + uint32_t vec_cache_sz; + uint32_t cache_sz; + + if (unlikely(!pool_cfg)) { + INTERNAL_ERROR(EM_FATAL(EM_ERR_BAD_POINTER), + EM_ESCOPE_POOL_CFG_INIT, + "pool_cfg pointer NULL!"); + return; + } + + odp_pool_param_init(&odp_pool_defaults); + memset(pool_cfg, 0, sizeof(*pool_cfg)); + + pool_cfg->event_type = EM_EVENT_TYPE_UNDEF; + + buf_cache_sz = odp_pool_defaults.buf.cache_size; + pkt_cache_sz = odp_pool_defaults.pkt.cache_size; + vec_cache_sz = odp_pool_defaults.vector.cache_size; + cache_sz = MIN(buf_cache_sz, pkt_cache_sz); + cache_sz = MIN(cache_sz, vec_cache_sz); + + for (int i = 0; i < EM_MAX_SUBPOOLS; i++) + pool_cfg->subpool[i].cache_size = cache_sz; + + pool_cfg->__internal_check = EM_CHECK_INIT_CALLED; +} + +em_pool_t +em_pool_create(const char *name, em_pool_t pool, const em_pool_cfg_t *pool_cfg) +{ + em_pool_t pool_created; + const char *err_str = ""; + + /* Verify config */ + int err = invalid_pool_cfg(pool_cfg, &err_str); + + if (unlikely(err)) { + INTERNAL_ERROR(EM_ERR_BAD_ARG, EM_ESCOPE_POOL_CREATE, + "Pool create: invalid pool-config(%d): %s", + err, err_str); + return EM_POOL_UNDEF; + } + + pool_created = pool_create(name, pool, pool_cfg); + + if (unlikely(pool_created == EM_POOL_UNDEF || + (pool != EM_POOL_UNDEF && pool != pool_created))) { + INTERNAL_ERROR(EM_ERR_LIB_FAILED, EM_ESCOPE_POOL_CREATE, + "Pool create failed,\n" + "requested:%" PRI_POOL " created:%" PRI_POOL "", + pool, pool_created); + return EM_POOL_UNDEF; + } + + return pool_created; +} + +em_status_t +em_pool_delete(em_pool_t pool) +{ + em_status_t stat; + + stat = pool_delete(pool); + RETURN_ERROR_IF(stat != EM_OK, stat, EM_ESCOPE_POOL_DELETE, + "Pool delete failed"); + + return EM_OK; +} + +em_pool_t +em_pool_find(const char *name) +{ + if (name && *name) + return pool_find(name); + + return EM_POOL_UNDEF; +} + +size_t +em_pool_get_name(em_pool_t pool, char *name /*out*/, size_t maxlen) +{ + const mpool_elem_t *mpool_elem = pool_elem_get(pool); + size_t len = 0; + + if (unlikely(name == NULL || maxlen == 0)) { + INTERNAL_ERROR(EM_ERR_BAD_POINTER, EM_ESCOPE_POOL_GET_NAME, + "Invalid args: name=0x%" PRIx64 ", maxlen=%zu", + name, maxlen); + return 0; + } + + if (unlikely(mpool_elem == NULL || !pool_allocated(mpool_elem))) { + INTERNAL_ERROR(EM_ERR_BAD_ID, EM_ESCOPE_POOL_GET_NAME, + "Invalid Pool:%" PRI_POOL "", pool); + name[0] = '\0'; + return 0; + } + + len = strnlen(mpool_elem->name, sizeof(mpool_elem->name) - 1); + if (maxlen - 1 < len) + len = maxlen - 1; + + memcpy(name, mpool_elem->name, len); + name[len] = '\0'; + + return len; +} + +em_pool_t +em_pool_get_first(unsigned int *num) +{ + const mpool_elem_t *const mpool_elem_tbl = em_shm->mpool_tbl.pool; + const mpool_elem_t *mpool_elem = &mpool_elem_tbl[0]; + const unsigned int pool_cnt = pool_count(); + + _pool_tbl_iter_idx = 0; /* reset iteration */ + + if (num) + *num = pool_cnt; + + if (pool_cnt == 0) { + _pool_tbl_iter_idx = EM_CONFIG_POOLS; /* UNDEF = _get_next() */ + return EM_POOL_UNDEF; + } + + /* find first */ + while (!pool_allocated(mpool_elem)) { + _pool_tbl_iter_idx++; + if (_pool_tbl_iter_idx >= EM_CONFIG_POOLS) + return EM_POOL_UNDEF; + mpool_elem = &mpool_elem_tbl[_pool_tbl_iter_idx]; + } + + return pool_idx2hdl(_pool_tbl_iter_idx); +} + +em_pool_t +em_pool_get_next(void) +{ + if (_pool_tbl_iter_idx >= EM_CONFIG_POOLS - 1) + return EM_POOL_UNDEF; + + _pool_tbl_iter_idx++; + + const mpool_elem_t *const mpool_elem_tbl = em_shm->mpool_tbl.pool; + const mpool_elem_t *mpool_elem = &mpool_elem_tbl[_pool_tbl_iter_idx]; + + /* find next */ + while (!pool_allocated(mpool_elem)) { + _pool_tbl_iter_idx++; + if (_pool_tbl_iter_idx >= EM_CONFIG_POOLS) + return EM_POOL_UNDEF; + mpool_elem = &mpool_elem_tbl[_pool_tbl_iter_idx]; + } + + return pool_idx2hdl(_pool_tbl_iter_idx); +} + +em_status_t +em_pool_info(em_pool_t pool, em_pool_info_t *pool_info /*out*/) +{ + const mpool_elem_t *pool_elem; + + RETURN_ERROR_IF(pool_info == NULL, + EM_ERR_BAD_POINTER, EM_ESCOPE_POOL_INFO, + "arg 'pool_info' invalid"); + + pool_elem = pool_elem_get(pool); + RETURN_ERROR_IF(pool_elem == NULL || !pool_allocated(pool_elem), + EM_ERR_BAD_ID, EM_ESCOPE_POOL_INFO, + "EM-pool:%" PRI_POOL " invalid", pool); + + memset(pool_info, 0, sizeof(*pool_info)); + /* copy pool info into the user provided 'pool_info' */ + strncpy(pool_info->name, pool_elem->name, sizeof(pool_info->name)); + pool_info->name[sizeof(pool_info->name) - 1] = '\0'; + pool_info->em_pool = pool_elem->em_pool; + pool_info->event_type = pool_elem->event_type; + pool_info->align_offset = pool_elem->align_offset; + pool_info->user_area_size = pool_elem->user_area.req_size; + pool_info->num_subpools = pool_elem->num_subpools; + + for (int i = 0; i < pool_elem->num_subpools; i++) { + pool_info->subpool[i].size = pool_elem->size[i]; /*sorted sz*/ + pool_info->subpool[i].num = pool_elem->pool_cfg.subpool[i].num; + pool_info->subpool[i].cache_size = pool_elem->pool_cfg.subpool[i].cache_size; + } + + /* + * EM pool usage statistics only collected if + * EM config file: pool.statistics_enable=true. + */ + if (!em_shm->opt.pool.statistics_enable) + return EM_OK; /* no statistics, return */ + + /* EM pool usage statistics _enabled_ - collect it: */ + for (int i = 0; i < pool_elem->num_subpools; i++) { + const uint64_t num = pool_elem->pool_cfg.subpool[i].num; + uint64_t used = 0; + uint64_t free = 0; + + odp_pool_stats_t odp_stats; + +#if ODP_VERSION_API_NUM(1, 37, 2) <= ODP_VERSION_API + /* avoid LTO-error: 'odp_stats.thread.first/last' may be used uninitialized */ + odp_stats.thread.first = 0; + odp_stats.thread.last = 0; +#endif + int ret = odp_pool_stats(pool_elem->odp_pool[i], &odp_stats); + + RETURN_ERROR_IF(ret, EM_ERR_LIB_FAILED, EM_ESCOPE_POOL_INFO, + "EM-pool:%" PRI_POOL " subpool:%d stats failed:%d", + pool, i, ret); + /* ODP inactive counters are zero, it is safe to add both: */ + free = odp_stats.available + odp_stats.cache_available; + used = num - free; + + /* Sanity check */ + if (free > num) + free = num; + if (used > num) + used = num; + + pool_info->subpool[i].used = used; + pool_info->subpool[i].free = free; + } + + return EM_OK; +} + +void +em_pool_info_print(em_pool_t pool) +{ + pool_info_print_hdr(1); + pool_info_print(pool); +} + +void +em_pool_info_print_all(void) +{ + em_pool_t pool; + unsigned int num; + + pool = em_pool_get_first(&num); + + pool_info_print_hdr(num); + while (pool != EM_POOL_UNDEF) { + pool_info_print(pool); + pool = em_pool_get_next(); + } +} diff --git a/src/misc/objpool.c b/src/misc/objpool.c index 5a1e8ccb..203cd6cf 100644 --- a/src/misc/objpool.c +++ b/src/misc/objpool.c @@ -1,124 +1,121 @@ -/* - * Copyright (c) 2015, Nokia Solutions and Networks - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include - -static inline objpool_elem_t * -objpool_node2elem(list_node_t *const list_node); - -int -objpool_init(objpool_t *const objpool, int nbr_subpools) -{ - int i; - - if (nbr_subpools > OBJSUBPOOLS_MAX) - nbr_subpools = OBJSUBPOOLS_MAX; - - objpool->nbr_subpools = nbr_subpools; - - for (i = 0; i < nbr_subpools; i++) { - objsubpool_t *const subpool = &objpool->subpool[i]; - - env_spinlock_init(&subpool->lock); - list_init(&subpool->list_head); - } - - return 0; -} - -void -objpool_add(objpool_t *const objpool, int subpool_idx, - objpool_elem_t *const elem) -{ - const int idx = subpool_idx % objpool->nbr_subpools; - objsubpool_t *const subpool = &objpool->subpool[idx]; - - elem->subpool_idx = idx; - - env_spinlock_lock(&subpool->lock); - list_add(&subpool->list_head, &elem->list_node); - elem->in_pool = 1; - env_spinlock_unlock(&subpool->lock); -} - -objpool_elem_t * -objpool_rem(objpool_t *const objpool, int subpool_idx) -{ - objpool_elem_t *elem = NULL; - int i; - - for (i = 0; i < objpool->nbr_subpools; i++) { - const int idx = (subpool_idx + i) % objpool->nbr_subpools; - objsubpool_t *const subpool = &objpool->subpool[idx]; - - env_spinlock_lock(&subpool->lock); - - list_node_t *const node = list_rem_first(&subpool->list_head); - - if (node != NULL) { - elem = objpool_node2elem(node); - elem->in_pool = 0; - } - - env_spinlock_unlock(&subpool->lock); - - if (node != NULL) - return elem; - } - - return NULL; -} - -int -objpool_rem_elem(objpool_t *const objpool, objpool_elem_t *const elem) -{ - const int idx = elem->subpool_idx; - objsubpool_t *const subpool = &objpool->subpool[idx]; - int ret = -1; - - env_spinlock_lock(&subpool->lock); - if (elem->in_pool) { - list_rem(&subpool->list_head, &elem->list_node); - elem->in_pool = 0; - ret = 0; - } - env_spinlock_unlock(&subpool->lock); - - return ret; -} - -static inline objpool_elem_t * -objpool_node2elem(list_node_t *const list_node) -{ - return (objpool_elem_t *)((uintptr_t)list_node - - offsetof(objpool_elem_t, list_node)); -} +/* + * Copyright (c) 2015, Nokia Solutions and Networks + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include + +static inline objpool_elem_t * +objpool_node2elem(list_node_t *const list_node); + +int +objpool_init(objpool_t *const objpool, int nbr_subpools) +{ + if (nbr_subpools > OBJSUBPOOLS_MAX) + nbr_subpools = OBJSUBPOOLS_MAX; + + objpool->nbr_subpools = nbr_subpools; + + for (int i = 0; i < nbr_subpools; i++) { + objsubpool_t *const subpool = &objpool->subpool[i]; + + env_spinlock_init(&subpool->lock); + list_init(&subpool->list_head); + } + + return 0; +} + +void +objpool_add(objpool_t *const objpool, int subpool_idx, + objpool_elem_t *const elem) +{ + const int idx = subpool_idx % objpool->nbr_subpools; + objsubpool_t *const subpool = &objpool->subpool[idx]; + + elem->subpool_idx = idx; + + env_spinlock_lock(&subpool->lock); + list_add(&subpool->list_head, &elem->list_node); + elem->in_pool = 1; + env_spinlock_unlock(&subpool->lock); +} + +objpool_elem_t * +objpool_rem(objpool_t *const objpool, int subpool_idx) +{ + objpool_elem_t *elem = NULL; + + for (int i = 0; i < objpool->nbr_subpools; i++) { + const int idx = (subpool_idx + i) % objpool->nbr_subpools; + objsubpool_t *const subpool = &objpool->subpool[idx]; + + env_spinlock_lock(&subpool->lock); + + list_node_t *const node = list_rem_first(&subpool->list_head); + + if (node != NULL) { + elem = objpool_node2elem(node); + elem->in_pool = 0; + } + + env_spinlock_unlock(&subpool->lock); + + if (node != NULL) + return elem; + } + + return NULL; +} + +int +objpool_rem_elem(objpool_t *const objpool, objpool_elem_t *const elem) +{ + const int idx = elem->subpool_idx; + objsubpool_t *const subpool = &objpool->subpool[idx]; + int ret = -1; + + env_spinlock_lock(&subpool->lock); + if (elem->in_pool) { + list_rem(&subpool->list_head, &elem->list_node); + elem->in_pool = 0; + ret = 0; + } + env_spinlock_unlock(&subpool->lock); + + return ret; +} + +static inline objpool_elem_t * +objpool_node2elem(list_node_t *const list_node) +{ + return (objpool_elem_t *)((uintptr_t)list_node - + offsetof(objpool_elem_t, list_node)); +}