From 4384f094b1b42d4ac99c00d2b2e87402dd4e842b Mon Sep 17 00:00:00 2001 From: Justin Miller Date: Fri, 27 Dec 2024 12:26:19 -0800 Subject: [PATCH] ooga --- drivers/bus/acpi_new/include/uacpi/acpi.h | 127 ++- drivers/bus/acpi_new/include/uacpi/context.h | 7 - drivers/bus/acpi_new/include/uacpi/event.h | 11 +- drivers/bus/acpi_new/include/uacpi/helpers.h | 2 +- .../acpi_new/include/uacpi/internal/context.h | 11 +- .../acpi_new/include/uacpi/internal/event.h | 8 +- .../include/uacpi/internal/interpreter.h | 2 +- .../bus/acpi_new/include/uacpi/internal/io.h | 12 +- .../bus/acpi_new/include/uacpi/internal/log.h | 2 + .../acpi_new/include/uacpi/internal/mutex.h | 76 +- .../include/uacpi/internal/namespace.h | 78 +- .../acpi_new/include/uacpi/internal/notify.h | 8 +- .../acpi_new/include/uacpi/internal/opcodes.h | 16 +- .../include/uacpi/internal/opregion.h | 27 +- .../include/uacpi/internal/registers.h | 3 + .../include/uacpi/internal/resources.h | 5 +- .../include/uacpi/internal/shareable.h | 4 + .../acpi_new/include/uacpi/internal/stdlib.h | 43 - .../acpi_new/include/uacpi/internal/tables.h | 6 +- .../acpi_new/include/uacpi/internal/types.h | 270 ++++- .../include/uacpi/internal/utilities.h | 2 + .../bus/acpi_new/include/uacpi/kernel_api.h | 60 +- .../bus/acpi_new/include/uacpi/namespace.h | 100 +- .../acpi_new/include/uacpi/platform/atomic.h | 22 + .../acpi_new/include/uacpi/platform/config.h | 123 ++ .../bus/acpi_new/include/uacpi/resources.h | 9 +- drivers/bus/acpi_new/include/uacpi/tables.h | 3 + drivers/bus/acpi_new/include/uacpi/types.h | 522 ++++----- drivers/bus/acpi_new/include/uacpi/uacpi.h | 92 +- .../bus/acpi_new/include/uacpi/utilities.h | 8 +- .../bus/acpi_new/source/default_handlers.c | 111 +- drivers/bus/acpi_new/source/event.c | 1010 ++++++++++++----- drivers/bus/acpi_new/source/interpreter.c | 240 ++-- drivers/bus/acpi_new/source/io.c | 210 ++-- drivers/bus/acpi_new/source/mutex.c | 218 +++- drivers/bus/acpi_new/source/namespace.c | 536 +++++++-- drivers/bus/acpi_new/source/notify.c | 179 ++- drivers/bus/acpi_new/source/opcodes.c | 12 +- drivers/bus/acpi_new/source/opregion.c | 588 +++++++--- drivers/bus/acpi_new/source/osi.c | 71 +- drivers/bus/acpi_new/source/registers.c | 43 +- drivers/bus/acpi_new/source/resources.c | 59 +- drivers/bus/acpi_new/source/shareable.c | 13 +- drivers/bus/acpi_new/source/sleep.c | 6 +- drivers/bus/acpi_new/source/stdlib.c | 19 +- drivers/bus/acpi_new/source/tables.c | 154 ++- drivers/bus/acpi_new/source/types.c | 451 +++++++- drivers/bus/acpi_new/source/uacpi.c | 357 ++++-- drivers/bus/acpi_new/source/utilities.c | 40 +- drivers/bus/acpi_new/uacpi_layer.c | 6 +- 50 files changed, 4420 insertions(+), 1562 deletions(-) create mode 100644 drivers/bus/acpi_new/include/uacpi/platform/config.h diff --git a/drivers/bus/acpi_new/include/uacpi/acpi.h b/drivers/bus/acpi_new/include/uacpi/acpi.h index e8f8ab4ef2550..3f2e629f77500 100644 --- a/drivers/bus/acpi_new/include/uacpi/acpi.h +++ b/drivers/bus/acpi_new/include/uacpi/acpi.h @@ -24,6 +24,7 @@ #define ACPI_SSDT_SIGNATURE "SSDT" #define ACPI_PSDT_SIGNATURE "PSDT" #define ACPI_ECDT_SIGNATURE "ECDT" +#define ACPI_RHCT_SIGNATURE "RHCT" #define ACPI_AS_ID_SYS_MEM 0x00 #define ACPI_AS_ID_SYS_IO 0x01 @@ -130,7 +131,11 @@ enum acpi_madt_entry_type { ACPI_MADT_ENTRY_TYPE_MSI_PIC = 0x15, ACPI_MADT_ENTRY_TYPE_BIO_PIC = 0x16, ACPI_MADT_ENTRY_TYPE_LPC_PIC = 0x17, - ACPI_MADT_ENTRY_TYPE_RESERVED = 0x18, // 0x18..0x7F + ACPI_MADT_ENTRY_TYPE_RINTC = 0x18, + ACPI_MADT_ENTRY_TYPE_IMSIC = 0x19, + ACPI_MADT_ENTRY_TYPE_APLIC = 0x1A, + ACPI_MADT_ENTRY_TYPE_PLIC = 0x1B, + ACPI_MADT_ENTRY_TYPE_RESERVED = 0x1C, // 0x1C..0x7F ACPI_MADT_ENTRY_TYPE_OEM = 0x80, // 0x80..0xFF }; @@ -418,6 +423,62 @@ UACPI_PACKED(struct acpi_madt_lpc_pic { }) UACPI_EXPECT_SIZEOF(struct acpi_madt_lpc_pic, 15); +UACPI_PACKED(struct acpi_madt_rintc { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 rsvd; + uacpi_u32 flags; + uacpi_u64 hart_id; + uacpi_u32 uid; + uacpi_u32 ext_intc_id; + uacpi_u64 address; + uacpi_u32 size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_rintc, 36); + +UACPI_PACKED(struct acpi_madt_imsic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 rsvd; + uacpi_u32 flags; + uacpi_u16 num_ids; + uacpi_u16 num_guest_ids; + uacpi_u8 guest_index_bits; + uacpi_u8 hart_index_bits; + uacpi_u8 group_index_bits; + uacpi_u8 group_index_shift; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_imsic, 16); + +UACPI_PACKED(struct acpi_madt_aplic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 id; + uacpi_u32 flags; + uacpi_u64 hardware_id; + uacpi_u16 idc_count; + uacpi_u16 sources_count; + uacpi_u32 gsi_base; + uacpi_u64 address; + uacpi_u32 size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_aplic, 36); + +UACPI_PACKED(struct acpi_madt_plic { + struct acpi_entry_hdr hdr; + uacpi_u8 version; + uacpi_u8 id; + uacpi_u64 hardware_id; + uacpi_u16 sources_count; + uacpi_u16 max_priority; + uacpi_u32 flags; + uacpi_u32 size; + uacpi_u64 address; + uacpi_u32 gsi_base; + +}) +UACPI_EXPECT_SIZEOF(struct acpi_madt_plic, 36); + enum acpi_srat_entry_type { ACPI_SRAT_ENTRY_TYPE_PROCESSOR_AFFINITY = 0, ACPI_SRAT_ENTRY_TYPE_MEMORY_AFFINITY = 1, @@ -942,6 +1003,70 @@ UACPI_PACKED(struct acpi_ecdt { }) UACPI_EXPECT_SIZEOF(struct acpi_ecdt, 65); +UACPI_PACKED(struct acpi_rhct_hdr { + uacpi_u16 type; + uacpi_u16 length; + uacpi_u16 revision; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_hdr, 6); + +// acpi_rhct->flags +#define ACPI_TIMER_CANNOT_WAKE_CPU (1 << 0) + +UACPI_PACKED(struct acpi_rhct { + struct acpi_sdt_hdr hdr; + uacpi_u32 flags; + uacpi_u64 timebase_frequency; + uacpi_u32 node_count; + uacpi_u32 nodes_offset; + struct acpi_rhct_hdr entries[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct, 56); + +enum acpi_rhct_entry_type { + ACPI_RHCT_ENTRY_TYPE_ISA_STRING = 0, + ACPI_RHCT_ENTRY_TYPE_CMO = 1, + ACPI_RHCT_ENTRY_TYPE_MMU = 2, + ACPI_RHCT_ENTRY_TYPE_HART_INFO = 65535, +}; + +UACPI_PACKED(struct acpi_rhct_isa_string { + struct acpi_rhct_hdr hdr; + uacpi_u16 length; + uacpi_u8 isa[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_isa_string, 8); + +UACPI_PACKED(struct acpi_rhct_cmo { + struct acpi_rhct_hdr hdr; + uacpi_u8 rsvd; + uacpi_u8 cbom_size; + uacpi_u8 cbop_size; + uacpi_u8 cboz_size; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_cmo, 10); + +enum acpi_rhct_mmu_type { + ACPI_RHCT_MMU_TYPE_SV39 = 0, + ACPI_RHCT_MMU_TYPE_SV48 = 1, + ACPI_RHCT_MMU_TYPE_SV57 = 2, +}; + +UACPI_PACKED(struct acpi_rhct_mmu { + struct acpi_rhct_hdr hdr; + uacpi_u8 rsvd; + uacpi_u8 type; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_mmu, 8); + +UACPI_PACKED(struct acpi_rhct_hart_info { + struct acpi_rhct_hdr hdr; + uacpi_u16 offset_count; + uacpi_u32 uid; + uacpi_u32 offsets[]; +}) +UACPI_EXPECT_SIZEOF(struct acpi_rhct_hart_info, 12); + #define ACPI_LARGE_ITEM (1 << 7) #define ACPI_SMALL_ITEM_NAME_IDX 3 diff --git a/drivers/bus/acpi_new/include/uacpi/context.h b/drivers/bus/acpi_new/include/uacpi/context.h index 82e5e9cd576b9..ce76c9ef02638 100644 --- a/drivers/bus/acpi_new/include/uacpi/context.h +++ b/drivers/bus/acpi_new/include/uacpi/context.h @@ -2,13 +2,6 @@ #include -#ifndef UACPI_DEFAULT_LOG_LEVEL - #define UACPI_DEFAULT_LOG_LEVEL UACPI_LOG_INFO -#endif - -#define UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS 30 -#define UACPI_DEFAULT_MAX_CALL_STACK_DEPTH 256 - #ifdef __cplusplus extern "C" { #endif diff --git a/drivers/bus/acpi_new/include/uacpi/event.h b/drivers/bus/acpi_new/include/uacpi/event.h index 6596e00a2028c..7d005a9976e44 100644 --- a/drivers/bus/acpi_new/include/uacpi/event.h +++ b/drivers/bus/acpi_new/include/uacpi/event.h @@ -68,7 +68,7 @@ uacpi_status uacpi_fixed_event_info( UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( uacpi_status uacpi_gpe_info( - uacpi_namespace_node *gpe_devicem, uacpi_u16 idx, + uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_event_info *out_info )) @@ -111,7 +111,7 @@ uacpi_status uacpi_install_gpe_handler( * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE */ UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( - uacpi_status uacpi_install_gpe_handler_raw( +uacpi_status uacpi_install_gpe_handler_raw( uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, uacpi_handle ctx )) @@ -129,7 +129,7 @@ uacpi_status uacpi_uninstall_gpe_handler( * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE */ UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( -uacpi_status uacpi_gpe_setup_for_wake( +uacpi_status uacpi_setup_gpe_for_wake( uacpi_namespace_node *gpe_device, uacpi_u16 idx, uacpi_namespace_node *wake_device )) @@ -143,15 +143,14 @@ uacpi_status uacpi_gpe_setup_for_wake( * NOTE: 'gpe_device' may be null for GPEs managed by \_GPE */ UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( -uacpi_status uacpi_gpe_enable_for_wake( +uacpi_status uacpi_enable_gpe_for_wake( uacpi_namespace_node *gpe_device, uacpi_u16 idx )) UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( -uacpi_status uacpi_gpe_disable_for_wake( +uacpi_status uacpi_disable_gpe_for_wake( uacpi_namespace_node *gpe_device, uacpi_u16 idx )) - /* * Finalize GPE initialization by enabling all GPEs not configured for wake and * having a matching AML handler detected. diff --git a/drivers/bus/acpi_new/include/uacpi/helpers.h b/drivers/bus/acpi_new/include/uacpi/helpers.h index f45c58386e9cc..a78cc10830861 100644 --- a/drivers/bus/acpi_new/include/uacpi/helpers.h +++ b/drivers/bus/acpi_new/include/uacpi/helpers.h @@ -6,7 +6,7 @@ #define UACPI_STATIC_ASSERT _Static_assert #endif -#define UACPI_BUILD_BUG_ON_WITH_MSG(expr, msg) UACPI_STATIC_ASSERT(!(expr), msg) +#define UACPI_BUILD_BUG_ON_WITH_MSG(expr, msg) //UACPI_STATIC_ASSERT(!(expr), msg) #define UACPI_BUILD_BUG_ON(expr) \ UACPI_BUILD_BUG_ON_WITH_MSG(expr, "BUILD BUG: " #expr " evaluated to true") diff --git a/drivers/bus/acpi_new/include/uacpi/internal/context.h b/drivers/bus/acpi_new/include/uacpi/internal/context.h index ece593a07cca0..07d87519ffa09 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/context.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/context.h @@ -50,11 +50,20 @@ struct uacpi_runtime_context { uacpi_bool has_global_lock; uacpi_handle sci_handle; #endif + uacpi_u64 opcodes_executed; + uacpi_u32 loop_timeout_seconds; uacpi_u32 max_call_stack_depth; uacpi_u32 global_lock_seq_num; - uacpi_handle *global_lock_mutex; + + /* + * These are stored here to protect against stuff like: + * - CopyObject(JUNK, \) + * - CopyObject(JUNK, \_GL) + */ + uacpi_mutex *global_lock_mutex; + uacpi_object *root_object; #ifndef UACPI_REDUCED_HARDWARE uacpi_handle *global_lock_event; diff --git a/drivers/bus/acpi_new/include/uacpi/internal/event.h b/drivers/bus/acpi_new/include/uacpi/internal/event.h index 0e786f77c14ff..40ced0db20681 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/event.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/event.h @@ -5,6 +5,10 @@ // This fixed event is internal-only, and we don't expose it in the enum #define UACPI_FIXED_EVENT_GLOBAL_LOCK 0 +UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( + uacpi_status uacpi_initialize_events_early(void) +) + UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( uacpi_status uacpi_initialize_events(void) ) @@ -12,8 +16,8 @@ UACPI_STUB_IF_REDUCED_HARDWARE( void uacpi_deinitialize_events(void) ) -UACPI_ALWAYS_OK_FOR_REDUCED_HARDWARE( - uacpi_status uacpi_events_match_post_dynamic_table_load(void) +UACPI_STUB_IF_REDUCED_HARDWARE( + void uacpi_events_match_post_dynamic_table_load(void) ) UACPI_ALWAYS_ERROR_FOR_REDUCED_HARDWARE( diff --git a/drivers/bus/acpi_new/include/uacpi/internal/interpreter.h b/drivers/bus/acpi_new/include/uacpi/internal/interpreter.h index 88c186928c791..3f180d680829f 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/interpreter.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/interpreter.h @@ -16,5 +16,5 @@ uacpi_status uacpi_osi(uacpi_handle handle, uacpi_object *retval); uacpi_status uacpi_execute_control_method( uacpi_namespace_node *scope, uacpi_control_method *method, - const uacpi_args *args, uacpi_object **ret + const uacpi_object_array *args, uacpi_object **ret ); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/io.h b/drivers/bus/acpi_new/include/uacpi/internal/io.h index 8642edf34fe8f..d9153db90c2f9 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/io.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/io.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include #include @@ -19,3 +19,13 @@ uacpi_status uacpi_read_field_unit( uacpi_status uacpi_write_field_unit( uacpi_field_unit *field, const void *src, uacpi_size size ); + +uacpi_status uacpi_system_io_read( + uacpi_io_addr address, uacpi_u8 width, uacpi_u64 *out +); +uacpi_status uacpi_system_io_write( + uacpi_io_addr address, uacpi_u8 width, uacpi_u64 in +); + +uacpi_status uacpi_system_memory_read(void *ptr, uacpi_u8 width, uacpi_u64 *out); +uacpi_status uacpi_system_memory_write(void *ptr, uacpi_u8 width, uacpi_u64 in); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/log.h b/drivers/bus/acpi_new/include/uacpi/internal/log.h index e1ad62d6947ef..006fb73fab2b4 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/log.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/log.h @@ -18,3 +18,5 @@ void uacpi_log(uacpi_log_level, const uacpi_char*, ...); #define uacpi_info(...) uacpi_log_lvl(UACPI_LOG_INFO, __VA_ARGS__) #define uacpi_warn(...) uacpi_log_lvl(UACPI_LOG_WARN, __VA_ARGS__) #define uacpi_error(...) uacpi_log_lvl(UACPI_LOG_ERROR, __VA_ARGS__) + +void uacpi_logger_initialize(void); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/mutex.h b/drivers/bus/acpi_new/include/uacpi/internal/mutex.h index ec253564564ef..a1680bdda4ff2 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/mutex.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/mutex.h @@ -1,8 +1,78 @@ #pragma once -#include +#include +#include uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex*); -uacpi_bool uacpi_acquire_aml_mutex(uacpi_mutex*, uacpi_u16 timeout); -void uacpi_release_aml_mutex(uacpi_mutex*); +uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex*, uacpi_u16 timeout); +uacpi_status uacpi_release_aml_mutex(uacpi_mutex*); + +static inline uacpi_status uacpi_acquire_native_mutex(uacpi_handle mtx) +{ + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + return uacpi_kernel_acquire_mutex(mtx, 0xFFFF); +} + +uacpi_status uacpi_acquire_native_mutex_with_timeout( + uacpi_handle mtx, uacpi_u16 timeout +); + +static inline uacpi_status uacpi_release_native_mutex(uacpi_handle mtx) +{ + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + uacpi_kernel_release_mutex(mtx); + return UACPI_STATUS_OK; +} + +static inline uacpi_status uacpi_acquire_native_mutex_may_be_null( + uacpi_handle mtx +) +{ + if (mtx == UACPI_NULL) + return UACPI_STATUS_OK; + + return uacpi_kernel_acquire_mutex(mtx, 0xFFFF); +} + +static inline uacpi_status uacpi_release_native_mutex_may_be_null( + uacpi_handle mtx +) +{ + if (mtx == UACPI_NULL) + return UACPI_STATUS_OK; + + uacpi_kernel_release_mutex(mtx); + return UACPI_STATUS_OK; +} + +struct uacpi_recursive_lock { + uacpi_handle mutex; + uacpi_size depth; + uacpi_thread_id owner; +}; + +uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock); +uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock); + +uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock); +uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock); + +struct uacpi_rw_lock { + uacpi_handle read_mutex; + uacpi_handle write_mutex; + uacpi_size num_readers; +}; + +uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock); + +uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock); + +uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock); +uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/namespace.h b/drivers/bus/acpi_new/include/uacpi/internal/namespace.h index 86c518ecbd00f..a9cba1cbe5647 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/namespace.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/namespace.h @@ -1,6 +1,7 @@ #pragma once #include +#include #include #include @@ -15,6 +16,12 @@ */ #define UACPI_NAMESPACE_NODE_FLAG_DANGLING (1u << 1) +/* + * This node is method-local and must not be exposed via public API as its + * lifetime is limited. + */ +#define UACPI_NAMESPACE_NODE_FLAG_TEMPORARY (1u << 2) + #define UACPI_NAMESPACE_NODE_PREDEFINED (1u << 31) typedef struct uacpi_namespace_node { @@ -33,13 +40,80 @@ void uacpi_deinitialize_namespace(void); uacpi_namespace_node *uacpi_namespace_node_alloc(uacpi_object_name name); void uacpi_namespace_node_unref(uacpi_namespace_node *node); -uacpi_status uacpi_node_install(uacpi_namespace_node *parent, uacpi_namespace_node *node); -void uacpi_node_uninstall(uacpi_namespace_node *node); + +uacpi_status uacpi_namespace_node_type_unlocked( + const uacpi_namespace_node *node, uacpi_object_type *out_type +); +uacpi_status uacpi_namespace_node_is_one_of_unlocked( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +); + +uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node); + +uacpi_object *uacpi_namespace_node_get_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask +); + +uacpi_status uacpi_namespace_node_acquire_object( + const uacpi_namespace_node *node, uacpi_object **out_obj +); +uacpi_status uacpi_namespace_node_acquire_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits, + uacpi_object **out_obj +); + +uacpi_status uacpi_namespace_node_reacquire_object( + uacpi_object *obj +); +uacpi_status uacpi_namespace_node_release_object( + uacpi_object *obj +); + +uacpi_status uacpi_namespace_node_install( + uacpi_namespace_node *parent, uacpi_namespace_node *node +); +uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node); uacpi_namespace_node *uacpi_namespace_node_find_sub_node( uacpi_namespace_node *parent, uacpi_object_name name ); +enum uacpi_may_search_above_parent { + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, +}; + +enum uacpi_permanent_only { + UACPI_PERMANENT_ONLY_NO, + UACPI_PERMANENT_ONLY_YES, +}; + +enum uacpi_should_lock { + UACPI_SHOULD_LOCK_NO, + UACPI_SHOULD_LOCK_YES, +}; + +uacpi_status uacpi_namespace_node_resolve( + uacpi_namespace_node *scope, const uacpi_char *path, enum uacpi_should_lock, + enum uacpi_may_search_above_parent, enum uacpi_permanent_only, + uacpi_namespace_node **out_node +); + +uacpi_status uacpi_namespace_do_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits, uacpi_u32 max_depth, enum uacpi_should_lock, + enum uacpi_permanent_only, void *user +); + uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node); +uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node); uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node); + +uacpi_status uacpi_namespace_read_lock(void); +uacpi_status uacpi_namespace_read_unlock(void); + +uacpi_status uacpi_namespace_write_lock(void); +uacpi_status uacpi_namespace_write_unlock(void); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/notify.h b/drivers/bus/acpi_new/include/uacpi/internal/notify.h index 95949df1270b4..27cb21c54500d 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/notify.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/notify.h @@ -1,9 +1,9 @@ #pragma once +#include #include -uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value); +uacpi_status uacpi_initialize_notify(void); +void uacpi_deinitialize_notify(void); -uacpi_handlers *uacpi_node_get_handlers( - uacpi_namespace_node *node -); +uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/opcodes.h b/drivers/bus/acpi_new/include/uacpi/internal/opcodes.h index 99c0ebc86c0b6..0f46cd84381d5 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/opcodes.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/opcodes.h @@ -347,7 +347,7 @@ UACPI_OP( \ name##Op, code, \ { \ UACPI_PARSE_OP_TRACKED_PKGLEN, \ - __VA_ARGS__ \ + ##__VA_ARGS__, \ UACPI_PARSE_OP_IF_HAS_DATA, 4, \ UACPI_PARSE_OP_RECORD_AML_PC, \ UACPI_PARSE_OP_TERM_ARG_OR_NAMED_OBJECT_OR_UNRESOLVED, \ @@ -396,7 +396,7 @@ UACPI_OP( \ UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, \ UACPI_PARSE_OP_TYPECHECK, UACPI_OBJECT_BUFFER, \ UACPI_PARSE_OP_OPERAND, \ - __VA_ARGS__ \ + ##__VA_ARGS__, \ UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, node_idx, \ UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_BUFFER_FIELD, \ @@ -544,11 +544,11 @@ UACPI_OP( \ ) \ UACPI_BUILD_PACKAGE_OP( \ Package, 0x12, 3, \ - UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1 \ ) \ UACPI_BUILD_PACKAGE_OP( \ VarPackage, 0x13, 2, \ - UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND \ ) \ UACPI_OP( \ MethodOp, 0x14, \ @@ -1074,7 +1074,7 @@ UACPI_OP( \ { \ UACPI_PARSE_OP_TRACKED_PKGLEN, \ UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD, \ - __VA_ARGS__ \ + ##__VA_ARGS__, \ UACPI_PARSE_OP_SKIP_WITH_WARN_IF_NULL, 1, \ UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, type, \ UACPI_PARSE_OP_INVOKE_HANDLER, \ @@ -1146,7 +1146,7 @@ UACPI_OP( \ ) \ UACPI_DO_BUILD_BUFFER_FIELD_OP( \ Create, UACPI_EXT_OP(0x13), 3, \ - UACPI_PARSE_OP_OPERAND, \ + UACPI_PARSE_OP_OPERAND \ ) \ UACPI_OUT_OF_LINE_OP( \ LoadTableOp, UACPI_EXT_OP(0x1F), \ @@ -1284,12 +1284,12 @@ UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ Processor, 0x83, UACPI_OBJECT_PROCESSOR, \ UACPI_PARSE_OP_LOAD_IMM, 1, \ UACPI_PARSE_OP_LOAD_IMM, 4, \ - UACPI_PARSE_OP_LOAD_IMM, 1, \ + UACPI_PARSE_OP_LOAD_IMM, 1 \ ) \ UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ PowerRes, 0x84, UACPI_OBJECT_POWER_RESOURCE, \ UACPI_PARSE_OP_LOAD_IMM, 1, \ - UACPI_PARSE_OP_LOAD_IMM, 2, \ + UACPI_PARSE_OP_LOAD_IMM, 2 \ ) \ UACPI_BUILD_NAMED_SCOPE_OBJECT_OP( \ ThermalZone, 0x85, UACPI_OBJECT_THERMAL_ZONE \ diff --git a/drivers/bus/acpi_new/include/uacpi/internal/opregion.h b/drivers/bus/acpi_new/include/uacpi/internal/opregion.h index 3eb8f1b4b5780..fe44f66dd1966 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/opregion.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/opregion.h @@ -1,27 +1,42 @@ #pragma once -#include +#include #include +uacpi_status uacpi_initialize_opregion(void); +void uacpi_deinitialize_opregion(void); + void uacpi_trace_region_error( uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret ); void uacpi_trace_region_io( - uacpi_namespace_node *node, uacpi_region_op op, + uacpi_namespace_node *node, uacpi_address_space space, uacpi_region_op op, uacpi_u64 offset, uacpi_u8 byte_size, uacpi_u64 ret ); +uacpi_status uacpi_install_address_space_handler_with_flags( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context, + uacpi_u16 flags +); + void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node); -uacpi_address_space_handlers *uacpi_node_get_address_space_handlers( - uacpi_namespace_node *node +uacpi_bool uacpi_address_space_handler_is_default( + uacpi_address_space_handler *handler ); -uacpi_status uacpi_opregion_find_and_install_handler( +uacpi_address_space_handlers *uacpi_node_get_address_space_handlers( uacpi_namespace_node *node ); -void uacpi_opregion_reg(uacpi_namespace_node *node); +uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node); + uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node); void uacpi_install_default_address_space_handlers(void); + +uacpi_status uacpi_dispatch_opregion_io( + uacpi_namespace_node *region_node, uacpi_u32 offset, uacpi_u8 byte_width, + uacpi_region_op op, uacpi_u64 *in_out +); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/registers.h b/drivers/bus/acpi_new/include/uacpi/internal/registers.h index 6ad28e718ff43..7db00613482f3 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/registers.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/registers.h @@ -2,6 +2,9 @@ #include +uacpi_status uacpi_ininitialize_registers(void); +void uacpi_deininitialize_registers(void); + enum uacpi_register { UACPI_REGISTER_PM1_STS = 0, UACPI_REGISTER_PM1_EN, diff --git a/drivers/bus/acpi_new/include/uacpi/internal/resources.h b/drivers/bus/acpi_new/include/uacpi/internal/resources.h index ece17d63a9d49..3d0b48ad8f406 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/resources.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/resources.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include enum uacpi_aml_resource { @@ -301,8 +301,7 @@ struct uacpi_resource_spec { const struct uacpi_resource_convert_instruction *to_aml; }; -typedef uacpi_resource_iteration_decision -(*uacpi_aml_resource_iteration_callback) ( +typedef uacpi_iteration_decision (*uacpi_aml_resource_iteration_callback)( void*, uacpi_u8 *data, uacpi_u16 resource_size, const struct uacpi_resource_spec* ); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/shareable.h b/drivers/bus/acpi_new/include/uacpi/internal/shareable.h index 78e61516f6205..e00d850db0153 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/shareable.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/shareable.h @@ -2,6 +2,10 @@ #include +struct uacpi_shareable { + uacpi_u32 reference_count; +}; + void uacpi_shareable_init(uacpi_handle); uacpi_bool uacpi_bugged_shareable(uacpi_handle); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/stdlib.h b/drivers/bus/acpi_new/include/uacpi/internal/stdlib.h index 7f574dd635fc1..18a40c1617800 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/stdlib.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/stdlib.h @@ -80,46 +80,3 @@ uacpi_u8 uacpi_bit_scan_forward(uacpi_u64); uacpi_u8 uacpi_bit_scan_backward(uacpi_u64); uacpi_u8 uacpi_popcount(uacpi_u64); - -#ifdef UACPI_TRACE_MUTEXES -#define UACPI_TRACE_MUTEX_ACQUISITION(mtx) \ - uacpi_trace("mutex %p acquired at %s:%d\n", mtx, __FILE__, __LINE__) - -#define UACPI_TRACE_MUTEX_ACQUISITION_TIMEOUT(mtx, timeout) \ - uacpi_trace("mutex %p acquisition timed out after %dms at %s:%d\n", \ - mtx, (uacpi_u16)timeout, __FILE__, __LINE__) - -#define UACPI_TRACE_MUTEX_RELEASE(mtx) \ - uacpi_trace("mutex %p released at %s:%d\n", mtx, __FILE__, __LINE__) -#else -#define UACPI_TRACE_MUTEX_ACQUISITION(mtx) -#define UACPI_TRACE_MUTEX_ACQUISITION_TIMEOUT(mtx, timeout) -#define UACPI_TRACE_MUTEX_RELEASE(mtx) -#endif - -#define UACPI_MUTEX_ACQUIRE(mtx) \ - do { \ - if (uacpi_unlikely(!uacpi_kernel_acquire_mutex(mtx, 0xFFFF))) { \ - uacpi_error( \ - "%s: unable to acquire mutex %p with an infinite timeout\n", \ - __FUNCTION__, mtx \ - ); \ - return UACPI_STATUS_INTERNAL_ERROR; \ - } \ - UACPI_TRACE_MUTEX_ACQUISITION(mtx); \ - } while (0) - -#define UACPI_MUTEX_ACQUIRE_WITH_TIMEOUT(mtx, timeout, ret) \ - do { \ - ret = uacpi_kernel_acquire_mutex(mtx, timeout); \ - if (ret) { \ - UACPI_TRACE_MUTEX_ACQUISITION(mtx); \ - } else { \ - UACPI_TRACE_MUTEX_ACQUISITION_TIMEOUT(mtx, timeout); \ - } \ - } while (0) - -#define UACPI_MUTEX_RELEASE(mtx) do { \ - uacpi_kernel_release_mutex(mtx); \ - UACPI_TRACE_MUTEX_RELEASE(mtx); \ - } while (0) diff --git a/drivers/bus/acpi_new/include/uacpi/internal/tables.h b/drivers/bus/acpi_new/include/uacpi/internal/tables.h index d52b6d9aa5b7c..35e1b16e48136 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/tables.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/tables.h @@ -48,11 +48,7 @@ uacpi_status uacpi_table_load_with_cause( uacpi_size idx, enum uacpi_table_load_cause cause ); -enum uacpi_table_iteration_decision { - UACPI_TABLE_ITERATION_DECISION_CONTINUE, - UACPI_TABLE_ITERATION_DECISION_BREAK, -}; -typedef enum uacpi_table_iteration_decision (*uacpi_table_iteration_callback) +typedef uacpi_iteration_decision (*uacpi_table_iteration_callback) (void *user, struct uacpi_installed_table *tbl, uacpi_size idx); uacpi_status uacpi_for_each_table( diff --git a/drivers/bus/acpi_new/include/uacpi/internal/types.h b/drivers/bus/acpi_new/include/uacpi/internal/types.h index e187cf202dd0d..bbc9f614efcb5 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/types.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/types.h @@ -2,6 +2,7 @@ #include #include +#include // object->flags field if object->type == UACPI_OBJECT_REFERENCE enum uacpi_reference_kind { @@ -18,10 +19,263 @@ enum uacpi_string_kind { UACPI_STRING_KIND_PATH, }; +typedef struct uacpi_buffer { + struct uacpi_shareable shareable; + union { + void *data; + uacpi_u8 *byte_data; + uacpi_char *text; + }; + uacpi_size size; +} uacpi_buffer; + +typedef struct uacpi_package { + struct uacpi_shareable shareable; + uacpi_object **objects; + uacpi_size count; +} uacpi_package; + +typedef struct uacpi_buffer_field { + uacpi_buffer *backing; + uacpi_size bit_index; + uacpi_u32 bit_length; + uacpi_bool force_buffer; +} uacpi_buffer_field; + +typedef struct uacpi_buffer_index { + uacpi_size idx; + uacpi_buffer *buffer; +} uacpi_buffer_index; + +typedef struct uacpi_mutex { + struct uacpi_shareable shareable; + uacpi_handle handle; + uacpi_thread_id owner; + uacpi_u16 depth; + uacpi_u8 sync_level; +} uacpi_mutex; + +typedef struct uacpi_event { + struct uacpi_shareable shareable; + uacpi_handle handle; +} uacpi_event; + +typedef struct uacpi_address_space_handler { + struct uacpi_shareable shareable; + uacpi_region_handler callback; + uacpi_handle user_context; + struct uacpi_address_space_handler *next; + struct uacpi_operation_region *regions; + uacpi_u16 space; + +#define UACPI_ADDRESS_SPACE_HANDLER_DEFAULT (1 << 0) + uacpi_u16 flags; +} uacpi_address_space_handler; + +/* + * NOTE: These are common object headers. + * Any changes to these structs must be propagated to all objects. + * ============================================================== + * Common for the following objects: + * - UACPI_OBJECT_OPERATION_REGION + * - UACPI_OBJECT_PROCESSOR + * - UACPI_OBJECT_DEVICE + * - UACPI_OBJECT_THERMAL_ZONE + */ +typedef struct uacpi_address_space_handlers { + struct uacpi_shareable shareable; + uacpi_address_space_handler *head; +} uacpi_address_space_handlers; + +typedef struct uacpi_device_notify_handler { + uacpi_notify_handler callback; + uacpi_handle user_context; + struct uacpi_device_notify_handler *next; +} uacpi_device_notify_handler; + /* - * TODO: Write a note here explaining how references are currently implemented - * and how some of the edge cases are handled. + * Common for the following objects: + * - UACPI_OBJECT_PROCESSOR + * - UACPI_OBJECT_DEVICE + * - UACPI_OBJECT_THERMAL_ZONE */ +typedef struct uacpi_handlers { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_head; + uacpi_device_notify_handler *notify_head; +} uacpi_handlers; + +// This region has a corresponding _REG method that was succesfully executed +#define UACPI_OP_REGION_STATE_REG_EXECUTED (1 << 0) + +// This region was successfully attached to a handler +#define UACPI_OP_REGION_STATE_ATTACHED (1 << 1) + +typedef struct uacpi_operation_region { + struct uacpi_shareable shareable; + uacpi_address_space_handler *handler; + uacpi_handle user_context; + uacpi_u16 space; + uacpi_u8 state_flags; + uacpi_u64 offset; + uacpi_u64 length; + + // If space == TABLE_DATA + uacpi_u64 table_idx; + + // Used to link regions sharing the same handler + struct uacpi_operation_region *next; +} uacpi_operation_region; + +typedef struct uacpi_device { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; +} uacpi_device; + +typedef struct uacpi_processor { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; + uacpi_u8 id; + uacpi_u32 block_address; + uacpi_u8 block_length; +} uacpi_processor; + +typedef struct uacpi_thermal_zone { + struct uacpi_shareable shareable; + uacpi_address_space_handler *address_space_handlers; + uacpi_device_notify_handler *notify_handlers; +} uacpi_thermal_zone; + +typedef struct uacpi_power_resource { + uacpi_u8 system_level; + uacpi_u16 resource_order; +} uacpi_power_resource; + +typedef uacpi_status (*uacpi_native_call_handler)( + uacpi_handle ctx, uacpi_object *retval +); + +typedef struct uacpi_control_method { + struct uacpi_shareable shareable; + union { + uacpi_u8 *code; + uacpi_native_call_handler handler; + }; + uacpi_mutex *mutex; + uacpi_u32 size; + uacpi_u8 sync_level : 4; + uacpi_u8 args : 3; + uacpi_u8 is_serialized : 1; + uacpi_u8 named_objects_persist: 1; + uacpi_u8 native_call : 1; + uacpi_u8 owns_code : 1; +} uacpi_control_method; + +typedef enum uacpi_access_type { + UACPI_ACCESS_TYPE_ANY = 0, + UACPI_ACCESS_TYPE_BYTE = 1, + UACPI_ACCESS_TYPE_WORD = 2, + UACPI_ACCESS_TYPE_DWORD = 3, + UACPI_ACCESS_TYPE_QWORD = 4, + UACPI_ACCESS_TYPE_BUFFER = 5, +} uacpi_access_type; + +typedef enum uacpi_access_attributes { + UACPI_ACCESS_ATTRIBUTE_QUICK = 0x02, + UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE = 0x04, + UACPI_ACCESS_ATTRIBUTE_BYTE = 0x06, + UACPI_ACCESS_ATTRIBUTE_WORD = 0x08, + UACPI_ACCESS_ATTRIBUTE_BLOCK = 0x0A, + UACPI_ACCESS_ATTRIBUTE_BYTES = 0x0B, + UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL = 0x0C, + UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL = 0x0D, + UACPI_ACCESS_ATTRIBUTE_RAW_BYTES = 0x0E, + UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES = 0x0F, +} uacpi_access_attributes; + +typedef enum uacpi_lock_rule { + UACPI_LOCK_RULE_NO_LOCK = 0, + UACPI_LOCK_RULE_LOCK = 1, +} uacpi_lock_rule; + +typedef enum uacpi_update_rule { + UACPI_UPDATE_RULE_PRESERVE = 0, + UACPI_UPDATE_RULE_WRITE_AS_ONES = 1, + UACPI_UPDATE_RULE_WRITE_AS_ZEROES = 2, +} uacpi_update_rule; + +typedef enum uacpi_field_unit_kind { + UACPI_FIELD_UNIT_KIND_NORMAL = 0, + UACPI_FIELD_UNIT_KIND_INDEX = 1, + UACPI_FIELD_UNIT_KIND_BANK = 2, +} uacpi_field_unit_kind; + +typedef struct uacpi_field_unit { + struct uacpi_shareable shareable; + + union { + // UACPI_FIELD_UNIT_KIND_NORMAL + struct { + uacpi_namespace_node *region; + }; + + // UACPI_FIELD_UNIT_KIND_INDEX + struct { + struct uacpi_field_unit *index; + struct uacpi_field_unit *data; + }; + + // UACPI_FIELD_UNIT_KIND_BANK + struct { + uacpi_namespace_node *bank_region; + struct uacpi_field_unit *bank_selection; + uacpi_u64 bank_value; + }; + }; + + uacpi_object *connection; + + uacpi_u32 byte_offset; + uacpi_u32 bit_length; + uacpi_u8 bit_offset_within_first_byte; + uacpi_u8 access_width_bytes; + uacpi_u8 access_length; + + uacpi_u8 attributes : 4; + uacpi_u8 update_rule : 2; + uacpi_u8 kind : 2; + uacpi_u8 lock_rule : 1; +} uacpi_field_unit; + +typedef struct uacpi_object { + struct uacpi_shareable shareable; + uacpi_u8 type; + uacpi_u8 flags; + + union { + uacpi_u64 integer; + uacpi_package *package; + uacpi_buffer_field buffer_field; + uacpi_object *inner_object; + uacpi_control_method *method; + uacpi_buffer *buffer; + uacpi_mutex *mutex; + uacpi_event *event; + uacpi_buffer_index buffer_index; + uacpi_operation_region *op_region; + uacpi_device *device; + uacpi_processor *processor; + uacpi_thermal_zone *thermal_zone; + uacpi_address_space_handlers *address_space_handlers; + uacpi_handlers *handlers; + uacpi_power_resource power_resource; + uacpi_field_unit *field_unit; + }; +} uacpi_object; + +uacpi_object *uacpi_create_object(uacpi_object_type type); enum uacpi_assign_behavior { UACPI_ASSIGN_BEHAVIOR_DEEP_COPY, @@ -39,9 +293,19 @@ struct uacpi_object *uacpi_create_internal_reference( ); uacpi_object *uacpi_unwrap_internal_reference(uacpi_object *object); -uacpi_bool uacpi_package_fill(uacpi_package *pkg, uacpi_size num_elements); +enum uacpi_prealloc_objects { + UACPI_PREALLOC_OBJECTS_NO, + UACPI_PREALLOC_OBJECTS_YES, +}; + +uacpi_bool uacpi_package_fill( + uacpi_package *pkg, uacpi_size num_elements, + enum uacpi_prealloc_objects prealloc_objects +); uacpi_mutex *uacpi_create_mutex(void); void uacpi_mutex_unref(uacpi_mutex*); +void uacpi_method_unref(uacpi_control_method*); + void uacpi_address_space_handler_unref(uacpi_address_space_handler *handler); diff --git a/drivers/bus/acpi_new/include/uacpi/internal/utilities.h b/drivers/bus/acpi_new/include/uacpi/internal/utilities.h index e3d7c739da291..606ec9259d8d9 100644 --- a/drivers/bus/acpi_new/include/uacpi/internal/utilities.h +++ b/drivers/bus/acpi_new/include/uacpi/internal/utilities.h @@ -41,3 +41,5 @@ uacpi_status uacpi_string_to_integer( uacpi_bool uacpi_is_valid_nameseg(uacpi_u8 *nameseg); void uacpi_free_dynamic_string(const uacpi_char *str); + +#define UACPI_NANOSECONDS_PER_SEC (1000ull * 1000ull * 1000ull) diff --git a/drivers/bus/acpi_new/include/uacpi/kernel_api.h b/drivers/bus/acpi_new/include/uacpi/kernel_api.h index b45cf314ff1a2..9f6acf956a720 100644 --- a/drivers/bus/acpi_new/include/uacpi/kernel_api.h +++ b/drivers/bus/acpi_new/include/uacpi/kernel_api.h @@ -28,39 +28,7 @@ void uacpi_kernel_deinitialize(void); #endif // Returns the PHYSICAL address of the RSDP structure via *out_rsdp_address. -uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rdsp_address); - -/* - * Raw IO API, this is only used for accessing verified data from - * "safe" code (aka not indirectly invoked by the AML interpreter), - * e.g. programming FADT & FACS registers. - * - * NOTE: - * 'byte_width' is ALWAYS one of 1, 2, 4, 8. You are NOT allowed to implement - * this in terms of memcpy, as hardware expects accesses to be of the EXACT - * width. - * ------------------------------------------------------------------------- - */ -uacpi_status uacpi_kernel_raw_memory_read( - uacpi_phys_addr address, uacpi_u8 byte_width, uacpi_u64 *out_value -); -uacpi_status uacpi_kernel_raw_memory_write( - uacpi_phys_addr address, uacpi_u8 byte_width, uacpi_u64 in_value -); - -/* - * NOTE: - * 'byte_width' is ALWAYS one of 1, 2, 4. You are NOT allowed to break e.g. a - * 4-byte access into four 1-byte accesses. Hardware ALWAYS expects accesses to - * be of the exact width. - */ -uacpi_status uacpi_kernel_raw_io_read( - uacpi_io_addr address, uacpi_u8 byte_width, uacpi_u64 *out_value -); -uacpi_status uacpi_kernel_raw_io_write( - uacpi_io_addr address, uacpi_u8 byte_width, uacpi_u64 in_value -); -// ------------------------------------------------------------------------- +uacpi_status uacpi_kernel_get_rsdp(uacpi_phys_addr *out_rsdp_address); /* * NOTE: @@ -145,10 +113,10 @@ void uacpi_kernel_vlog(uacpi_log_level, const uacpi_char*, uacpi_va_list); #endif /* - * Returns the number of 100 nanosecond ticks elapsed since boot, + * Returns the number of nanosecond ticks elapsed since boot, * strictly monotonic. */ -uacpi_u64 uacpi_kernel_get_ticks(void); +uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void); /* * Spin for N microseconds. @@ -181,9 +149,21 @@ uacpi_thread_id uacpi_kernel_get_thread_id(void); /* * Try to acquire the mutex with a millisecond timeout. - * A timeout value of 0xFFFF implies infinite wait. + * + * The timeout value has the following meanings: + * 0x0000 - Attempt to acquire the mutex once, in a non-blocking manner + * 0x0001...0xFFFE - Attempt to acquire the mutex for at least 'timeout' + * milliseconds + * 0xFFFF - Infinite wait, block until the mutex is acquired + * + * The following are possible return values: + * 1. UACPI_STATUS_OK - successful acquire operation + * 2. UACPI_STATUS_TIMEOUT - timeout reached while attempting to acquire (or the + * single attempt to acquire was not successful for + * calls with timeout=0) + * 3. Any other value - signifies a host internal error and is treated as such */ -uacpi_bool uacpi_kernel_acquire_mutex(uacpi_handle, uacpi_u16); +uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle, uacpi_u16); void uacpi_kernel_release_mutex(uacpi_handle); /* @@ -281,7 +261,11 @@ uacpi_status uacpi_kernel_schedule_work( ); /* - * Blocks until all scheduled work is complete and the work queue becomes empty. + * Waits for two types of work to finish: + * 1. All in-flight interrupts installed via uacpi_kernel_install_interrupt_handler + * 2. All work scheduled via uacpi_kernel_schedule_work + * + * Note that the waits must be done in this order specifically. */ uacpi_status uacpi_kernel_wait_for_work_completion(void); diff --git a/drivers/bus/acpi_new/include/uacpi/namespace.h b/drivers/bus/acpi_new/include/uacpi/namespace.h index d2b0927a4f4ac..3a944290e4dcc 100644 --- a/drivers/bus/acpi_new/include/uacpi/namespace.h +++ b/drivers/bus/acpi_new/include/uacpi/namespace.h @@ -28,14 +28,57 @@ uacpi_namespace_node *uacpi_namespace_get_predefined( uacpi_predefined_namespace ); -uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node); +/* + * Returns UACPI_TRUE if the provided 'node' is an alias. + */ +uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node); + uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node); +/* + * Returns the type of object stored at the namespace node. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_type( + const uacpi_namespace_node *node, uacpi_object_type *out_type +); + +/* + * Returns UACPI_TRUE via 'out' if the type of the object stored at the + * namespace node matches the provided value, UACPI_FALSE otherwise. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_is( + const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out +); + +/* + * Returns UACPI_TRUE via 'out' if the type of the object stored at the + * namespace node matches any of the type bits in the provided value, + * UACPI_FALSE otherwise. + * + * NOTE: due to the existance of the CopyObject operator in AML, the + * return value of this function is subject to TOCTOU bugs. + */ +uacpi_status uacpi_namespace_node_is_one_of( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +); + uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node); -uacpi_namespace_node *uacpi_namespace_node_find( +uacpi_namespace_node *uacpi_namespace_node_parent( + uacpi_namespace_node *node +); + +uacpi_status uacpi_namespace_node_find( uacpi_namespace_node *parent, - const uacpi_char *path + const uacpi_char *path, + uacpi_namespace_node **out_node ); /* @@ -44,32 +87,43 @@ uacpi_namespace_node *uacpi_namespace_node_find( * only desired if resolving a namepath specified in an aml-provided object, * such as a package element. */ -uacpi_namespace_node *uacpi_namespace_node_resolve_from_aml_namepath( +uacpi_status uacpi_namespace_node_resolve_from_aml_namepath( uacpi_namespace_node *scope, - const uacpi_char *path + const uacpi_char *path, + uacpi_namespace_node **out_node ); -typedef enum uacpi_ns_iteration_decision { - // Continue to the next child of this node - UACPI_NS_ITERATION_DECISION_CONTINUE, - - /* - * Don't go any deeper, instead continue to the next peer of the - * parent node currently being iterated. - */ - UACPI_NS_ITERATION_DECISION_NEXT_PEER, +typedef uacpi_iteration_decision (*uacpi_iteration_callback) ( + void *user, uacpi_namespace_node *node, uacpi_u32 node_depth +); - // Abort iteration - UACPI_NS_ITERATION_DECISION_BREAK, -} uacpi_ns_iteration_decision; +#define UACPI_MAX_DEPTH_ANY 0xFFFFFFFF -typedef uacpi_ns_iteration_decision - (*uacpi_iteration_callback)(void *user, uacpi_namespace_node *node); +/* + * Depth-first iterate the namespace starting at the first child of 'parent'. + */ +uacpi_status uacpi_namespace_for_each_child_simple( + uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user +); -void uacpi_namespace_for_each_node_depth_first( - uacpi_namespace_node *parent, - uacpi_iteration_callback callback, - void *user +/* + * Depth-first iterate the namespace starting at the first child of 'parent'. + * + * 'descending_callback' is invoked the first time a node is visited when + * walking down. 'ascending_callback' is invoked the second time a node is + * visited after we reach the leaf node without children and start walking up. + * Either of the callbacks may be NULL, but not both at the same time. + * + * Only nodes matching 'type_mask' are passed to the callbacks. + * + * 'max_depth' is used to limit the maximum reachable depth from 'parent', + * where 1 is only direct children of 'parent', 2 is children of first-level + * children etc. Use UACPI_MAX_DEPTH_ANY or -1 to specify infinite depth. + */ +uacpi_status uacpi_namespace_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user ); const uacpi_char *uacpi_namespace_node_generate_absolute_path( diff --git a/drivers/bus/acpi_new/include/uacpi/platform/atomic.h b/drivers/bus/acpi_new/include/uacpi/platform/atomic.h index 4d0726b78123b..20db940142310 100644 --- a/drivers/bus/acpi_new/include/uacpi/platform/atomic.h +++ b/drivers/bus/acpi_new/include/uacpi/platform/atomic.h @@ -50,6 +50,12 @@ #define UACPI_MSVC_ATOMIC_LOAD(ptr, type, width) \ _InterlockedOr##width((type volatile*)(ptr), 0) +#define UACPI_MSVC_ATOMIC_INC(ptr, type, width) \ + _InterlockedIncrement##width((type volatile*)(ptr)) + +#define UACPI_MSVC_ATOMIC_DEC(ptr, type, width) \ + _InterlockedDecrement##width((type volatile*)(ptr)) + UACPI_MAKE_MSVC_CMPXCHG(64, __int64, 64) UACPI_MAKE_MSVC_CMPXCHG(32, long,) UACPI_MAKE_MSVC_CMPXCHG(16, short, 16) @@ -72,6 +78,14 @@ UACPI_MAKE_MSVC_CMPXCHG(16, short, 16) #define uacpi_atomic_store16(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, short, 16) #define uacpi_atomic_store32(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, long,) #define uacpi_atomic_store64(ptr, value) UACPI_MSVC_ATOMIC_STORE(ptr, value, __int64, 64) + +#define uacpi_atomic_inc16(ptr) UACPI_MSVC_ATOMIC_INC(ptr, short, 16) +#define uacpi_atomic_inc32(ptr) UACPI_MSVC_ATOMIC_INC(ptr, long,) +#define uacpi_atomic_inc64(ptr) UACPI_MSVC_ATOMIC_INC(ptr, __int64, 64) + +#define uacpi_atomic_dec16(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, short, 16) +#define uacpi_atomic_dec32(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, long,) +#define uacpi_atomic_dec64(ptr) UACPI_MSVC_ATOMIC_DEC(ptr, __int64, 64) #else #define UACPI_DO_CMPXCHG(ptr, expected, desired) \ @@ -94,6 +108,14 @@ UACPI_MAKE_MSVC_CMPXCHG(16, short, 16) #define uacpi_atomic_store16(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) #define uacpi_atomic_store32(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) #define uacpi_atomic_store64(ptr, value) __atomic_store_n(ptr, value, __ATOMIC_RELEASE) + +#define uacpi_atomic_inc16(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_inc32(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_inc64(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_ACQ_REL) + +#define uacpi_atomic_dec16(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_dec32(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) +#define uacpi_atomic_dec64(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_ACQ_REL) #endif #if UACPI_POINTER_SIZE == 4 diff --git a/drivers/bus/acpi_new/include/uacpi/platform/config.h b/drivers/bus/acpi_new/include/uacpi/platform/config.h new file mode 100644 index 0000000000000..7af043a4a63e1 --- /dev/null +++ b/drivers/bus/acpi_new/include/uacpi/platform/config.h @@ -0,0 +1,123 @@ +#pragma once + +#ifdef UACPI_OVERRIDE_CONFIG +#include "uacpi_config.h" +#else + +#include +#include + +/* + * ======================= + * Context-related options + * ======================= + */ +#ifndef UACPI_DEFAULT_LOG_LEVEL + #define UACPI_DEFAULT_LOG_LEVEL UACPI_LOG_INFO +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_LOG_LEVEL < UACPI_LOG_ERROR || + UACPI_DEFAULT_LOG_LEVEL > UACPI_LOG_DEBUG, + "configured default log level is invalid" +); + +#ifndef UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS + #define UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS 30 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS < 1, + "configured default loop timeout is invalid (expecting at least 1 second)" +); + +#ifndef UACPI_DEFAULT_MAX_CALL_STACK_DEPTH + #define UACPI_DEFAULT_MAX_CALL_STACK_DEPTH 256 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_DEFAULT_MAX_CALL_STACK_DEPTH < 4, + "configured default max call stack depth is invalid " + "(expecting at least 4 frames)" +); + +/* + * =================== + * Kernel-api options + * =================== + */ + +/* + * Convenience initialization/deinitialization hooks that will be called by + * uACPI automatically when appropriate if compiled-in. + */ +// #define UACPI_KERNEL_INITIALIZATION + +/* + * Makes kernel api logging callbacks work with unformatted printf-style + * strings and va_args instead of a pre-formatted string. Can be useful if + * your native logging is implemented in terms of this format as well. + */ +// #define UACPI_FORMATTED_LOGGING + +/* + * Makes uacpi_kernel_free take in an additional 'size_hint' parameter, which + * contains the size of the original allocation. Note that this comes with a + * performance penalty in some cases. + */ +// #define UACPI_SIZED_FREES + +/* + * ========================= + * Platform-specific options + * ========================= + */ + +/* + * Turns uacpi_phys_addr and uacpi_io_addr into a 32-bit type, and adds extra + * code for address truncation. Needed for e.g. i686 platforms without PAE + * support. + */ +// #define UACPI_PHYS_ADDR_IS_32BITS + +/* + * Switches uACPI into reduced-hardware-only mode. Strips all full-hardware + * ACPI support code at compile-time, including the event subsystem, the global + * lock, and other full-hardware features. + */ +// #define UACPI_REDUCED_HARDWARE + +/* + * ============= + * Misc. options + * ============= + */ + +/* + * If UACPI_FORMATTED_LOGGING is not enabled, this is the maximum length of the + * pre-formatted message that is passed to the logging callback. + */ +#ifndef UACPI_PLAIN_LOG_BUFFER_SIZE + #define UACPI_PLAIN_LOG_BUFFER_SIZE 128 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_PLAIN_LOG_BUFFER_SIZE < 16, + "configured log buffer size is too small (expecting at least 16 bytes)" +); + +/* + * The size of the table descriptor inline storage. All table descriptors past + * this length will be stored in a dynamically allocated heap array. The size + * of one table descriptor is approximately 56 bytes. + */ +#ifndef UACPI_STATIC_TABLE_ARRAY_LEN + #define UACPI_STATIC_TABLE_ARRAY_LEN 16 +#endif + +UACPI_BUILD_BUG_ON_WITH_MSG( + UACPI_STATIC_TABLE_ARRAY_LEN < 1, + "configured static table array length is too small (expecting at least 1)" +); + +#endif diff --git a/drivers/bus/acpi_new/include/uacpi/resources.h b/drivers/bus/acpi_new/include/uacpi/resources.h index f206ec4ad13ed..2d4b4d82be34d 100644 --- a/drivers/bus/acpi_new/include/uacpi/resources.h +++ b/drivers/bus/acpi_new/include/uacpi/resources.h @@ -645,13 +645,8 @@ typedef struct uacpi_resources { } uacpi_resources; void uacpi_free_resources(uacpi_resources*); -typedef enum uacpi_resource_iteration_decision { - UACPI_RESOURCE_ITERATION_ABORT, - UACPI_RESOURCE_ITERATION_CONTINUE, -} uacpi_resource_iteration_decision; - -typedef uacpi_resource_iteration_decision - (*uacpi_resource_iteration_callback)(void *user, uacpi_resource *resource); +typedef uacpi_iteration_decision (*uacpi_resource_iteration_callback) + (void *user, uacpi_resource *resource); uacpi_status uacpi_get_current_resources( uacpi_namespace_node *device, uacpi_resources **out_resources diff --git a/drivers/bus/acpi_new/include/uacpi/tables.h b/drivers/bus/acpi_new/include/uacpi/tables.h index c1d10ca60cc39..e256d897bc7ba 100644 --- a/drivers/bus/acpi_new/include/uacpi/tables.h +++ b/drivers/bus/acpi_new/include/uacpi/tables.h @@ -7,6 +7,9 @@ extern "C" { #endif +// Forward-declared to avoid including the entire acpi.h here +struct acpi_fadt; + typedef struct uacpi_table_identifiers { uacpi_object_name signature; diff --git a/drivers/bus/acpi_new/include/uacpi/types.h b/drivers/bus/acpi_new/include/uacpi/types.h index eea14756e32a1..a79744c0f8228 100644 --- a/drivers/bus/acpi_new/include/uacpi/types.h +++ b/drivers/bus/acpi_new/include/uacpi/types.h @@ -77,6 +77,17 @@ typedef struct uacpi_pci_address { uacpi_u8 function; } uacpi_pci_address; +typedef struct uacpi_data_view { + union { + uacpi_u8 *bytes; + const uacpi_u8 *const_bytes; + + uacpi_char *text; + const uacpi_char *const_text; + }; + uacpi_size length; +} uacpi_data_view; + typedef void *uacpi_handle; typedef struct uacpi_namespace_node uacpi_namespace_node; @@ -104,71 +115,244 @@ typedef enum uacpi_object_type { } uacpi_object_type; // Type bits for API requiring a bit mask, e.g. uacpi_eval_typed -#define UACPI_OBJECT_INTEGER_BIT (1 << UACPI_OBJECT_INTEGER) -#define UACPI_OBJECT_STRING_BIT (1 << UACPI_OBJECT_STRING) -#define UACPI_OBJECT_BUFFER_BIT (1 << UACPI_OBJECT_BUFFER) -#define UACPI_OBJECT_PACKAGE_BIT (1 << UACPI_OBJECT_PACKAGE) -#define UACPI_OBJECT_FIELD_UNIT_BIT (1 << UACPI_OBJECT_FIELD_UNIT) -#define UACPI_OBJECT_DEVICE_BIT (1 << UACPI_OBJECT_DEVICE) -#define UACPI_OBJECT_EVENT_BIT (1 << UACPI_OBJECT_EVENT) -#define UACPI_OBJECT_METHOD_BIT (1 << UACPI_OBJECT_METHOD) -#define UACPI_OBJECT_MUTEX_BIT (1 << UACPI_OBJECT_MUTEX) -#define UACPI_OBJECT_OPERATION_REGION_BIT (1 << UACPI_OBJECT_OPERATION_REGION) -#define UACPI_OBJECT_POWER_RESOURCE_BIT (1 << UACPI_OBJECT_POWER_RESOURCE) -#define UACPI_OBJECT_PROCESSOR_BIT (1 << UACPI_OBJECT_PROCESSOR) -#define UACPI_OBJECT_THERMAL_ZONE_BIT (1 << UACPI_OBJECT_THERMAL_ZONE) -#define UACPI_OBJECT_BUFFER_FIELD_BIT (1 << UACPI_OBJECT_BUFFER_FIELD) -#define UACPI_OBJECT_DEBUG_BIT (1 << UACPI_OBJECT_DEBUG) -#define UACPI_OBJECT_REFERENCE_BIT (1 << UACPI_OBJECT_REFERENCE) -#define UACPI_OBJECT_BUFFER_INDEX_BIT (1 << UACPI_OBJECT_BUFFER_INDEX) +typedef enum uacpi_object_type_bits { + UACPI_OBJECT_INTEGER_BIT = (1 << UACPI_OBJECT_INTEGER), + UACPI_OBJECT_STRING_BIT = (1 << UACPI_OBJECT_STRING), + UACPI_OBJECT_BUFFER_BIT = (1 << UACPI_OBJECT_BUFFER), + UACPI_OBJECT_PACKAGE_BIT = (1 << UACPI_OBJECT_PACKAGE), + UACPI_OBJECT_FIELD_UNIT_BIT = (1 << UACPI_OBJECT_FIELD_UNIT), + UACPI_OBJECT_DEVICE_BIT = (1 << UACPI_OBJECT_DEVICE), + UACPI_OBJECT_EVENT_BIT = (1 << UACPI_OBJECT_EVENT), + UACPI_OBJECT_METHOD_BIT = (1 << UACPI_OBJECT_METHOD), + UACPI_OBJECT_MUTEX_BIT = (1 << UACPI_OBJECT_MUTEX), + UACPI_OBJECT_OPERATION_REGION_BIT = (1 << UACPI_OBJECT_OPERATION_REGION), + UACPI_OBJECT_POWER_RESOURCE_BIT = (1 << UACPI_OBJECT_POWER_RESOURCE), + UACPI_OBJECT_PROCESSOR_BIT = (1 << UACPI_OBJECT_PROCESSOR), + UACPI_OBJECT_THERMAL_ZONE_BIT = (1 << UACPI_OBJECT_THERMAL_ZONE), + UACPI_OBJECT_BUFFER_FIELD_BIT = (1 << UACPI_OBJECT_BUFFER_FIELD), + UACPI_OBJECT_DEBUG_BIT = (1 << UACPI_OBJECT_DEBUG), + UACPI_OBJECT_REFERENCE_BIT = (1 << UACPI_OBJECT_REFERENCE), + UACPI_OBJECT_BUFFER_INDEX_BIT = (1 << UACPI_OBJECT_BUFFER_INDEX), + UACPI_OBJECT_ANY_BIT = 0xFFFFFFFF, +} uacpi_object_type_bits; -const uacpi_char *uacpi_object_type_to_string(uacpi_object_type); typedef struct uacpi_object uacpi_object; -struct uacpi_shareable { - uacpi_u32 reference_count; -}; +void uacpi_object_ref(uacpi_object *obj); +void uacpi_object_unref(uacpi_object *obj); + +uacpi_object_type uacpi_object_get_type(uacpi_object*); +uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object*); -typedef struct uacpi_buffer { - struct uacpi_shareable shareable; - union { - void *data; - uacpi_u8 *byte_data; - uacpi_char *text; - }; - uacpi_size size; -} uacpi_buffer; +/* + * Returns UACPI_TRUE if the provided object's type matches this type. + */ +uacpi_bool uacpi_object_is(uacpi_object*, uacpi_object_type); + +/* + * Returns UACPI_TRUE if the provided object's type is one of the values + * specified in the 'type_mask' of UACPI_OBJECT_*_BIT. + */ +uacpi_bool uacpi_object_is_one_of( + uacpi_object*, uacpi_object_type_bits type_mask +); + +const uacpi_char *uacpi_object_type_to_string(uacpi_object_type); + +/* + * Create an uninitialized object. The object can be further overwritten via + * uacpi_object_assign_* to anything. + */ +uacpi_object *uacpi_object_create_uninitialized(void); + +/* + * Create an integer object with the value provided. + */ +uacpi_object *uacpi_object_create_integer(uacpi_u64); + +typedef enum uacpi_overflow_behavior { + UACPI_OVERFLOW_ALLOW = 0, + UACPI_OVERFLOW_TRUNCATE, + UACPI_OVERFLOW_DISALLOW, +} uacpi_overflow_behavior; + +/* + * Same as uacpi_object_create_integer, but introduces additional ways to + * control what happens if the provided integer is larger than 32-bits, and the + * AML code expects 32-bit integers. + * + * - UACPI_OVERFLOW_ALLOW -> do nothing, same as the vanilla helper + * - UACPI_OVERFLOW_TRUNCATE -> truncate the integer to 32-bits if it happens to + * be larger than allowed by the DSDT + * - UACPI_OVERFLOW_DISALLOW -> fail object creation with + * UACPI_STATUS_INVALID_ARGUMENT if the provided + * value happens to be too large + */ +uacpi_status uacpi_object_create_integer_safe( + uacpi_u64, uacpi_overflow_behavior, uacpi_object **out_obj +); -typedef struct uacpi_package { - struct uacpi_shareable shareable; +uacpi_status uacpi_object_assign_integer(uacpi_object*, uacpi_u64 value); +uacpi_status uacpi_object_get_integer(uacpi_object*, uacpi_u64 *out); + +/* + * Create a string/buffer object. Takes in a constant view of the data. + * + * NOTE: The data is copied to a separately allocated buffer and is not taken + * ownership of. + */ +uacpi_object *uacpi_object_create_string(uacpi_data_view); +uacpi_object *uacpi_object_create_cstring(const uacpi_char*); +uacpi_object *uacpi_object_create_buffer(uacpi_data_view); + +/* + * Returns a writable view of the data stored in the string or buffer type + * object. + */ +uacpi_status uacpi_object_get_string_or_buffer( + uacpi_object*, uacpi_data_view *out +); +uacpi_status uacpi_object_get_string(uacpi_object*, uacpi_data_view *out); +uacpi_status uacpi_object_get_buffer(uacpi_object*, uacpi_data_view *out); + +/* + * Returns UACPI_TRUE if the provided string object is actually an AML namepath. + * + * This can only be the case for package elements. If a package element is + * specified as a path to an object in AML, it's not resolved by the interpreter + * right away as it might not have been defined at that point yet, and is + * instead stored as a special string object to be resolved by client code + * when needed. + * + * Example usage: + * uacpi_namespace_node *target_node = UACPI_NULL; + * + * uacpi_object *obj = UACPI_NULL; + * uacpi_eval(scope, path, UACPI_NULL, &obj); + * + * uacpi_object_array arr; + * uacpi_object_get_package(obj, &arr); + * + * if (uacpi_object_is_aml_namepath(arr.objects[0])) { + * uacpi_object_resolve_as_aml_namepath( + * arr.objects[0], scope, &target_node + * ); + * } + */ +uacpi_bool uacpi_object_is_aml_namepath(uacpi_object*); + +/* + * Resolve an AML namepath contained in a string object. + * + * This is only applicable to objects that are package elements. See an + * explanation of how this works in the comment above the declaration of + * uacpi_object_is_aml_namepath. + * + * This is a shorthand for: + * uacpi_data_view view; + * uacpi_object_get_string(object, &view); + * + * target_node = uacpi_namespace_node_resolve_from_aml_namepath( + * scope, view.text + * ); + */ +uacpi_status uacpi_object_resolve_as_aml_namepath( + uacpi_object*, uacpi_namespace_node *scope, uacpi_namespace_node **out_node +); + +/* + * Make the provided object a string/buffer. + * Takes in a constant view of the data to be stored in the object. + * + * NOTE: The data is copied to a separately allocated buffer and is not taken + * ownership of. + */ +uacpi_status uacpi_object_assign_string(uacpi_object*, uacpi_data_view in); +uacpi_status uacpi_object_assign_buffer(uacpi_object*, uacpi_data_view in); + +typedef struct uacpi_object_array { uacpi_object **objects; uacpi_size count; -} uacpi_package; - -typedef struct uacpi_buffer_field { - uacpi_buffer *backing; - uacpi_size bit_index; - uacpi_u32 bit_length; - uacpi_bool force_buffer; -} uacpi_buffer_field; - -typedef struct uacpi_buffer_index { - uacpi_size idx; - uacpi_buffer *buffer; -} uacpi_buffer_index; - -typedef struct uacpi_mutex { - struct uacpi_shareable shareable; - uacpi_handle handle; - uacpi_thread_id owner; - uacpi_u16 depth; - uacpi_u8 sync_level; -} uacpi_mutex; - -typedef struct uacpi_event { - struct uacpi_shareable shareable; - uacpi_handle handle; -} uacpi_event; +} uacpi_object_array; + +/* + * Create a package object and store all of the objects in the array inside. + * The array is allowed to be empty. + * + * NOTE: the reference count of each object is incremented before being stored + * in the object. Client code must remove all of the locally created + * references at its own discretion. + */ +uacpi_object *uacpi_object_create_package(uacpi_object_array in); + +/* + * Returns the list of objects stored in a package object. + * + * NOTE: the reference count of the objects stored inside is not incremented, + * which means destorying/overwriting the object also potentially destroys + * all of the objects stored inside unless the reference count is + * incremented by the client via uacpi_object_ref. + */ +uacpi_status uacpi_object_get_package(uacpi_object*, uacpi_object_array *out); + +/* + * Make the provided object a package and store all of the objects in the array + * inside. The array is allowed to be empty. + * + * NOTE: the reference count of each object is incremented before being stored + * in the object. Client code must remove all of the locally created + * references at its own discretion. + */ +uacpi_status uacpi_object_assign_package(uacpi_object*, uacpi_object_array in); + +/* + * Create a reference object and make it point to 'child'. + * + * NOTE: child's reference count is incremented by one. Client code must remove + * all of the locally created references at its own discretion. + */ +uacpi_object *uacpi_object_create_reference(uacpi_object *child); + +/* + * Make the provided object a reference and make it point to 'child'. + * + * NOTE: child's reference count is incremented by one. Client code must remove + * all of the locally created references at its own discretion. + */ +uacpi_status uacpi_object_assign_reference(uacpi_object*, uacpi_object *child); + +/* + * Retrieve the object pointed to by a reference object. + * + * NOTE: the reference count of the returned object is incremented by one and + * must be uacpi_object_unref'ed by the client when no longer needed. + */ +uacpi_status uacpi_object_get_dereferenced(uacpi_object*, uacpi_object **out); + +typedef struct uacpi_processor_info { + uacpi_u8 id; + uacpi_u32 block_address; + uacpi_u8 block_length; +} uacpi_processor_info; + +/* + * Returns the information about the provided processor object. + */ +uacpi_status uacpi_object_get_processor_info( + uacpi_object*, uacpi_processor_info *out +); + +typedef struct uacpi_power_resource_info { + uacpi_u8 system_level; + uacpi_u16 resource_order; +} uacpi_power_resource_info; + +/* + * Returns the information about the provided power resource object. + */ +uacpi_status uacpi_object_get_power_resource_info( + uacpi_object*, uacpi_power_resource_info *out +); typedef enum uacpi_region_op { UACPI_REGION_OP_ATTACH = 1, @@ -203,51 +387,9 @@ typedef struct uacpi_region_detach_data { typedef uacpi_status (*uacpi_region_handler) (uacpi_region_op op, uacpi_handle op_data); -typedef struct uacpi_address_space_handler { - struct uacpi_shareable shareable; - uacpi_region_handler callback; - uacpi_handle user_context; - struct uacpi_address_space_handler *next; - struct uacpi_operation_region *regions; - uacpi_u16 space; -} uacpi_address_space_handler; - typedef uacpi_status (*uacpi_notify_handler) (uacpi_handle context, uacpi_namespace_node *node, uacpi_u64 value); -typedef struct uacpi_device_notify_handler { - uacpi_notify_handler callback; - uacpi_handle user_context; - struct uacpi_device_notify_handler *next; -} uacpi_device_notify_handler; - -/* - * NOTE: These are common object headers. - * Any changes to these structs must be propagated to all objects. - * ============================================================== - * Common for the following objects: - * - UACPI_OBJECT_OPERATION_REGION - * - UACPI_OBJECT_PROCESSOR - * - UACPI_OBJECT_DEVICE - * - UACPI_OBJECT_THERMAL_ZONE - */ -typedef struct uacpi_address_space_handlers { - struct uacpi_shareable shareable; - uacpi_address_space_handler *head; -} uacpi_address_space_handlers; - -/* - * Common for the following objects: - * - UACPI_OBJECT_PROCESSOR - * - UACPI_OBJECT_DEVICE - * - UACPI_OBJECT_THERMAL_ZONE - */ -typedef struct uacpi_handlers { - struct uacpi_shareable shareable; - uacpi_address_space_handler *address_space_head; - uacpi_device_notify_handler *notify_head; -} uacpi_handlers; - typedef enum uacpi_address_space { UACPI_ADDRESS_SPACE_SYSTEM_MEMORY = 0, UACPI_ADDRESS_SPACE_SYSTEM_IO = 1, @@ -268,177 +410,6 @@ typedef enum uacpi_address_space { } uacpi_address_space; const uacpi_char *uacpi_address_space_to_string(uacpi_address_space space); -// This region has a corresponding _REG method that was succesfully executed -#define UACPI_OP_REGION_STATE_REG_EXECUTED (1 << 0) - -// This region was successfully attached to a handler -#define UACPI_OP_REGION_STATE_ATTACHED (1 << 1) - -typedef struct uacpi_operation_region { - struct uacpi_shareable shareable; - uacpi_address_space_handler *handler; - uacpi_handle user_context; - uacpi_u16 space; - uacpi_u8 state_flags; - uacpi_u64 offset; - uacpi_u64 length; - - // Used to link regions sharing the same handler - struct uacpi_operation_region *next; -} uacpi_operation_region; - -typedef struct uacpi_device { - struct uacpi_shareable shareable; - uacpi_address_space_handler *address_space_handlers; - uacpi_device_notify_handler *notify_handlers; -} uacpi_device; - -typedef struct uacpi_processor { - struct uacpi_shareable shareable; - uacpi_address_space_handler *address_space_handlers; - uacpi_device_notify_handler *notify_handlers; - uacpi_u8 id; - uacpi_u32 block_address; - uacpi_u8 block_length; -} uacpi_processor; - -typedef struct uacpi_thermal_zone { - struct uacpi_shareable shareable; - uacpi_address_space_handler *address_space_handlers; - uacpi_device_notify_handler *notify_handlers; -} uacpi_thermal_zone; - -typedef struct uacpi_power_resource { - uacpi_u8 system_level; - uacpi_u16 resource_order; -} uacpi_power_resource; - -typedef uacpi_status (*uacpi_native_call_handler)( - uacpi_handle ctx, uacpi_object *retval -); - -typedef struct uacpi_control_method { - struct uacpi_shareable shareable; - union { - uacpi_u8 *code; - uacpi_native_call_handler handler; - }; - uacpi_mutex *mutex; - uacpi_u32 size; - uacpi_u8 sync_level : 4; - uacpi_u8 args : 3; - uacpi_u8 is_serialized : 1; - uacpi_u8 named_objects_persist: 1; - uacpi_u8 native_call : 1; -} uacpi_control_method; - -typedef enum uacpi_access_type { - UACPI_ACCESS_TYPE_ANY = 0, - UACPI_ACCESS_TYPE_BYTE = 1, - UACPI_ACCESS_TYPE_WORD = 2, - UACPI_ACCESS_TYPE_DWORD = 3, - UACPI_ACCESS_TYPE_QWORD = 4, - UACPI_ACCESS_TYPE_BUFFER = 5, -} uacpi_access_type; - -typedef enum uacpi_access_attributes { - UACPI_ACCESS_ATTRIBUTE_QUICK = 0x02, - UACPI_ACCESS_ATTRIBUTE_SEND_RECEIVE = 0x04, - UACPI_ACCESS_ATTRIBUTE_BYTE = 0x06, - UACPI_ACCESS_ATTRIBUTE_WORD = 0x08, - UACPI_ACCESS_ATTRIBUTE_BLOCK = 0x0A, - UACPI_ACCESS_ATTRIBUTE_BYTES = 0x0B, - UACPI_ACCESS_ATTRIBUTE_PROCESS_CALL = 0x0C, - UACPI_ACCESS_ATTRIBUTE_BLOCK_PROCESS_CALL = 0x0D, - UACPI_ACCESS_ATTRIBUTE_RAW_BYTES = 0x0E, - UACPI_ACCESS_ATTRIBUTE_RAW_PROCESS_BYTES = 0x0F, -} uacpi_access_attributes; - -typedef enum uacpi_lock_rule { - UACPI_LOCK_RULE_NO_LOCK = 0, - UACPI_LOCK_RULE_LOCK = 1, -} uacpi_lock_rule; - -typedef enum uacpi_update_rule { - UACPI_UPDATE_RULE_PRESERVE = 0, - UACPI_UPDATE_RULE_WRITE_AS_ONES = 1, - UACPI_UPDATE_RULE_WRITE_AS_ZEROES = 2, -} uacpi_update_rule; - -typedef enum uacpi_field_unit_kind { - UACPI_FIELD_UNIT_KIND_NORMAL = 0, - UACPI_FIELD_UNIT_KIND_INDEX = 1, - UACPI_FIELD_UNIT_KIND_BANK = 2, -} uacpi_field_unit_kind; - -typedef struct uacpi_field_unit { - struct uacpi_shareable shareable; - - union { - // UACPI_FIELD_UNIT_KIND_NORMAL - struct { - uacpi_namespace_node *region; - }; - - // UACPI_FIELD_UNIT_KIND_INDEX - struct { - struct uacpi_field_unit *index; - struct uacpi_field_unit *data; - }; - - // UACPI_FIELD_UNIT_KIND_BANK - struct { - uacpi_namespace_node *bank_region; - struct uacpi_field_unit *bank_selection; - uacpi_u64 bank_value; - }; - }; - - uacpi_object *connection; - - uacpi_u32 byte_offset; - uacpi_u32 bit_length; - uacpi_u8 bit_offset_within_first_byte; - uacpi_u8 access_width_bytes; - uacpi_u8 access_length; - - uacpi_u8 attributes : 4; - uacpi_u8 update_rule : 2; - uacpi_u8 kind : 2; - uacpi_u8 lock_rule : 1; -} uacpi_field_unit; - -typedef struct uacpi_object { - struct uacpi_shareable shareable; - uacpi_u8 type; - uacpi_u8 flags; - - union { - uacpi_u64 integer; - uacpi_package *package; - uacpi_buffer_field buffer_field; - uacpi_object *inner_object; - uacpi_control_method *method; - uacpi_buffer *buffer; - uacpi_mutex *mutex; - uacpi_event *event; - uacpi_buffer_index buffer_index; - uacpi_operation_region *op_region; - uacpi_device *device; - uacpi_processor *processor; - uacpi_thermal_zone *thermal_zone; - uacpi_address_space_handlers *address_space_handlers; - uacpi_handlers *handlers; - uacpi_power_resource power_resource; - uacpi_field_unit *field_unit; - }; -} uacpi_object; - -typedef struct uacpi_args { - uacpi_object **objects; - uacpi_size count; -} uacpi_args; - typedef union uacpi_object_name { uacpi_char text[4]; uacpi_u32 id; @@ -474,10 +445,13 @@ typedef uacpi_u32 uacpi_interrupt_ret; typedef uacpi_interrupt_ret (*uacpi_interrupt_handler)(uacpi_handle); -uacpi_object *uacpi_create_object(uacpi_object_type type); +typedef enum uacpi_iteration_decision { + UACPI_ITERATION_DECISION_CONTINUE = 0, + UACPI_ITERATION_DECISION_BREAK, -void uacpi_object_ref(uacpi_object *obj); -void uacpi_object_unref(uacpi_object *obj); + // Only applicable for uacpi_namespace_for_each_child + UACPI_ITERATION_DECISION_NEXT_PEER, +} uacpi_iteration_decision; #ifdef __cplusplus } diff --git a/drivers/bus/acpi_new/include/uacpi/uacpi.h b/drivers/bus/acpi_new/include/uacpi/uacpi.h index 69f95a1de36a8..9c5bebc36ad66 100644 --- a/drivers/bus/acpi_new/include/uacpi/uacpi.h +++ b/drivers/bus/acpi_new/include/uacpi/uacpi.h @@ -117,8 +117,24 @@ uacpi_init_level uacpi_get_current_init_level(void); * A value of NULL for 'parent' implies uacpi_namespace_root() relative * lookups, unless 'path' is already absolute. */ -uacpi_status uacpi_eval(uacpi_namespace_node *parent, const uacpi_char *path, - const uacpi_args *args, uacpi_object **ret); +uacpi_status uacpi_eval( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * Same as uacpi_eval() but without a return value. + */ +uacpi_status uacpi_execute( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args +); +uacpi_status uacpi_execute_simple( + uacpi_namespace_node *parent, const uacpi_char *path +); /* * Same as uacpi_eval, but the return value type is validated against @@ -126,7 +142,12 @@ uacpi_status uacpi_eval(uacpi_namespace_node *parent, const uacpi_char *path, */ uacpi_status uacpi_eval_typed( uacpi_namespace_node *parent, const uacpi_char *path, - const uacpi_args *args, uacpi_u32 ret_mask, uacpi_object **ret + const uacpi_object_array *args, uacpi_object_type_bits ret_mask, + uacpi_object **ret +); +uacpi_status uacpi_eval_simple_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_object_type_bits ret_mask, uacpi_object **ret ); /* @@ -134,8 +155,71 @@ uacpi_status uacpi_eval_typed( */ uacpi_status uacpi_eval_integer( uacpi_namespace_node *parent, const uacpi_char *path, - const uacpi_args *args, uacpi_u64 *out_value + const uacpi_object_array *args, uacpi_u64 *out_value +); +uacpi_status uacpi_eval_simple_integer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value +); + +/* + * A shorthand for uacpi_eval_typed with + * UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT + * + * Use uacpi_object_get_string_or_buffer to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_STRING_BIT. + * + * Use uacpi_object_get_string to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_BUFFER_BIT. + * + * Use uacpi_object_get_buffer to retrieve the resulting buffer data. + */ +uacpi_status uacpi_eval_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +); +uacpi_status uacpi_eval_simple_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * A shorthand for uacpi_eval_typed with UACPI_OBJECT_PACKAGE_BIT. + * + * Use uacpi_object_get_package to retrieve the resulting object array. + */ +uacpi_status uacpi_eval_package( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret ); +uacpi_status uacpi_eval_simple_package( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +); + +/* + * Get the bitness of the currently loaded AML code according to the DSDT. + * + * Returns either 32 or 64. + */ +uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness); /* * Helpers for entering & leaving ACPI mode. Note that ACPI mode is entered diff --git a/drivers/bus/acpi_new/include/uacpi/utilities.h b/drivers/bus/acpi_new/include/uacpi/utilities.h index e091adc47731a..49f4c2580f405 100644 --- a/drivers/bus/acpi_new/include/uacpi/utilities.h +++ b/drivers/bus/acpi_new/include/uacpi/utilities.h @@ -11,7 +11,7 @@ extern "C" { /* * Checks whether the device at 'node' matches any of the PNP ids provided in * 'list' (terminated by a UACPI_NULL). This is done by first attempting to - * match the value retruned from _HID and then the value(s) from _CID. + * match the value returned from _HID and then the value(s) from _CID. * * Note that the presence of the device (_STA) is not verified here. */ @@ -22,9 +22,9 @@ uacpi_bool uacpi_device_matches_pnp_id( /* * Find all the devices in the namespace starting at 'parent' matching the - * specified 'hids' (terminated by a UACPI_NULL). Only devices reported as - * present via _STA are checked. Any matching devices are then passed to - * the 'cb'. + * specified 'hids' (terminated by a UACPI_NULL) against any value from _HID or + * _CID. Only devices reported as present via _STA are checked. Any matching + * devices are then passed to the 'cb'. */ uacpi_status uacpi_find_devices_at( uacpi_namespace_node *parent, diff --git a/drivers/bus/acpi_new/source/default_handlers.c b/drivers/bus/acpi_new/source/default_handlers.c index f54231657b2fb..cb569a040fdb0 100644 --- a/drivers/bus/acpi_new/source/default_handlers.c +++ b/drivers/bus/acpi_new/source/default_handlers.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -62,8 +63,13 @@ static uacpi_status pci_region_attach(uacpi_region_attach_data *data) */ device = node; while (device) { - obj = uacpi_namespace_node_get_object(device); - if (obj && obj->type == UACPI_OBJECT_DEVICE) + uacpi_object_type type; + + ret = uacpi_namespace_node_type(device, &type); + if (uacpi_unlikely_error(ret)) + return ret; + + if (type == UACPI_OBJECT_DEVICE) break; device = device->parent; @@ -164,14 +170,21 @@ struct memory_region_ctx { static uacpi_status memory_region_attach(uacpi_region_attach_data *data) { struct memory_region_ctx *ctx; + uacpi_object *region_obj; uacpi_operation_region *op_region; - uacpi_status ret = UACPI_STATUS_OK; + uacpi_status ret; ctx = uacpi_kernel_alloc(sizeof(*ctx)); if (ctx == UACPI_NULL) return UACPI_STATUS_OUT_OF_MEMORY; - op_region = uacpi_namespace_node_get_object(data->region_node)->op_region; + ret = uacpi_namespace_node_acquire_object_typed( + data->region_node, UACPI_OBJECT_OPERATION_REGION_BIT, ®ion_obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + + op_region = region_obj->op_region; ctx->size = op_region->length; // FIXME: this really shouldn't try to map everything at once @@ -182,10 +195,12 @@ static uacpi_status memory_region_attach(uacpi_region_attach_data *data) ret = UACPI_STATUS_MAPPING_FAILED; uacpi_trace_region_error(data->region_node, "unable to map", ret); uacpi_free(ctx, sizeof(*ctx)); - return ret; + goto out; } data->out_region_context = ctx; +out: + uacpi_namespace_node_release_object(region_obj); return ret; } @@ -206,6 +221,7 @@ struct io_region_ctx { static uacpi_status io_region_attach(uacpi_region_attach_data *data) { struct io_region_ctx *ctx; + uacpi_object *region_obj; uacpi_operation_region *op_region; uacpi_status ret; @@ -213,7 +229,13 @@ static uacpi_status io_region_attach(uacpi_region_attach_data *data) if (ctx == UACPI_NULL) return UACPI_STATUS_OUT_OF_MEMORY; - op_region = uacpi_namespace_node_get_object(data->region_node)->op_region; + ret = uacpi_namespace_node_acquire_object_typed( + data->region_node, UACPI_OBJECT_OPERATION_REGION_BIT, ®ion_obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + + op_region = region_obj->op_region; ctx->base = op_region->offset; ret = uacpi_kernel_io_map(ctx->base, op_region->length, &ctx->handle); @@ -222,10 +244,13 @@ static uacpi_status io_region_attach(uacpi_region_attach_data *data) data->region_node, "unable to map an IO", ret ); uacpi_free(ctx, sizeof(*ctx)); - return ret; + goto out; } data->out_region_context = ctx; + +out: + uacpi_object_unref(region_obj); return ret; } @@ -238,50 +263,6 @@ static uacpi_status io_region_detach(uacpi_region_detach_data *data) return UACPI_STATUS_OK; } -static uacpi_status memory_read(void *ptr, uacpi_u8 width, uacpi_u64 *out) -{ - switch (width) { - case 1: - *out = *(volatile uacpi_u8*)ptr; - break; - case 2: - *out = *(volatile uacpi_u16*)ptr; - break; - case 4: - *out = *(volatile uacpi_u32*)ptr; - break; - case 8: - *out = *(volatile uacpi_u64*)ptr; - break; - default: - return UACPI_STATUS_INVALID_ARGUMENT; - } - - return UACPI_STATUS_OK; -} - -static uacpi_status memory_write(void *ptr, uacpi_u8 width, uacpi_u64 in) -{ - switch (width) { - case 1: - *(volatile uacpi_u8*)ptr = in; - break; - case 2: - *(volatile uacpi_u16*)ptr = in; - break; - case 4: - *(volatile uacpi_u32*)ptr = in; - break; - case 8: - *(volatile uacpi_u64*)ptr = in; - break; - default: - return UACPI_STATUS_INVALID_ARGUMENT; - } - - return UACPI_STATUS_OK; -} - static uacpi_status memory_region_do_rw( uacpi_region_op op, uacpi_region_rw_data *data ) @@ -292,8 +273,8 @@ static uacpi_status memory_region_do_rw( ptr = ctx->virt + (data->address - ctx->phys); return op == UACPI_REGION_OP_READ ? - memory_read(ptr, data->byte_width, &data->value) : - memory_write(ptr, data->byte_width, data->value); + uacpi_system_memory_read(ptr, data->byte_width, &data->value) : + uacpi_system_memory_write(ptr, data->byte_width, data->value); } static uacpi_status handle_memory_region(uacpi_region_op op, uacpi_handle op_data) @@ -318,8 +299,8 @@ static uacpi_status table_data_region_do_rw( void *addr = UACPI_VIRT_ADDR_TO_PTR((uacpi_virt_addr)data->offset); return op == UACPI_REGION_OP_READ ? - memory_read(addr, data->byte_width, &data->value) : - memory_write(addr, data->byte_width, data->value); + uacpi_system_memory_read(addr, data->byte_width, &data->value) : + uacpi_system_memory_write(addr, data->byte_width, data->value); } static uacpi_status handle_table_data_region(uacpi_region_op op, uacpi_handle op_data) @@ -373,23 +354,27 @@ void uacpi_install_default_address_space_handlers(void) root = uacpi_namespace_root(); - uacpi_install_address_space_handler( + uacpi_install_address_space_handler_with_flags( root, UACPI_ADDRESS_SPACE_SYSTEM_MEMORY, - handle_memory_region, UACPI_NULL + handle_memory_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT ); - uacpi_install_address_space_handler( + uacpi_install_address_space_handler_with_flags( root, UACPI_ADDRESS_SPACE_SYSTEM_IO, - handle_io_region, UACPI_NULL + handle_io_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT ); - uacpi_install_address_space_handler( + uacpi_install_address_space_handler_with_flags( root, UACPI_ADDRESS_SPACE_PCI_CONFIG, - handle_pci_region, UACPI_NULL + handle_pci_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT ); - uacpi_install_address_space_handler( + uacpi_install_address_space_handler_with_flags( root, UACPI_ADDRESS_SPACE_TABLE_DATA, - handle_table_data_region, UACPI_NULL + handle_table_data_region, UACPI_NULL, + UACPI_ADDRESS_SPACE_HANDLER_DEFAULT ); } diff --git a/drivers/bus/acpi_new/source/event.c b/drivers/bus/acpi_new/source/event.c index cb0b32cb776b2..48224862da2ad 100644 --- a/drivers/bus/acpi_new/source/event.c +++ b/drivers/bus/acpi_new/source/event.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #define UACPI_EVENT_DISABLED 0 @@ -14,6 +15,10 @@ #ifndef UACPI_REDUCED_HARDWARE +static uacpi_handle g_gpe_state_slock; +static struct uacpi_recursive_lock g_event_lock; +static uacpi_bool g_gpes_finalized; + struct fixed_event { uacpi_u8 enable_field; uacpi_u8 status_field; @@ -102,34 +107,61 @@ static uacpi_status set_event(uacpi_u8 event, uacpi_u8 value) uacpi_status uacpi_enable_fixed_event(uacpi_fixed_event event) { - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + /* * Attempting to enable an event that doesn't have a handler is most likely * an error, don't allow it. */ - if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) - return UACPI_STATUS_NO_HANDLER; + if (uacpi_unlikely(fixed_event_handlers[event].handler == UACPI_NULL)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } + + ret = set_event(event, UACPI_EVENT_ENABLED); - return set_event(event, UACPI_EVENT_ENABLED); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_disable_fixed_event(uacpi_fixed_event event) { - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; - return set_event(event, UACPI_EVENT_DISABLED); + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = set_event(event, UACPI_EVENT_DISABLED); + + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_clear_fixed_event(uacpi_fixed_event event) { - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; @@ -286,9 +318,9 @@ struct gpe_interrupt_ctx { uacpi_handle irq_handle; uacpi_u32 irq; }; -static struct gpe_interrupt_ctx *gpe_interrupt_head; +static struct gpe_interrupt_ctx *g_gpe_interrupt_head; -uacpi_u8 gpe_get_mask(struct gp_event *event) +static uacpi_u8 gpe_get_mask(struct gp_event *event) { return 1 << (event->idx - event->reg->base_idx); } @@ -305,9 +337,10 @@ static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) struct gpe_register *reg = event->reg; uacpi_u64 enable_mask; uacpi_u8 event_bit; + uacpi_cpu_flags flags; event_bit = gpe_get_mask(event); - if (reg->masked_mask & event_bit) + if (state != GPE_STATE_DISABLED && (reg->masked_mask & event_bit)) return UACPI_STATUS_OK; if (state == GPE_STATE_ENABLED_CONDITIONALLY) { @@ -317,9 +350,11 @@ static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) state = GPE_STATE_ENABLED; } + flags = uacpi_kernel_lock_spinlock(g_gpe_state_slock); + ret = uacpi_gas_read(®->enable, &enable_mask); if (uacpi_unlikely_error(ret)) - return ret; + goto out; switch (state) { case GPE_STATE_ENABLED: @@ -329,10 +364,14 @@ static uacpi_status set_gpe_state(struct gp_event *event, enum gpe_state state) enable_mask &= ~event_bit; break; default: - return UACPI_STATUS_INVALID_ARGUMENT; + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; } - return uacpi_gas_write(®->enable, enable_mask); + ret = uacpi_gas_write(®->enable, enable_mask); +out: + uacpi_kernel_unlock_spinlock(g_gpe_state_slock, flags); + return ret; } static uacpi_status clear_gpe(struct gp_event *event) @@ -375,20 +414,26 @@ static void async_run_gpe_handler(uacpi_handle opaque) uacpi_status ret; struct gp_event *event = opaque; + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) + goto out_no_unlock; + switch (event->handler_type) { case GPE_HANDLER_TYPE_AML_HANDLER: { uacpi_object *method_obj; - method_obj = uacpi_namespace_node_get_object(event->aml_handler); - if (uacpi_unlikely(method_obj == UACPI_NULL || - method_obj->type != UACPI_OBJECT_METHOD)) { - uacpi_error("GPE(%02X) has invalid or deleted AML handler\n", - event->idx); + method_obj = uacpi_namespace_node_get_object_typed( + event->aml_handler, UACPI_OBJECT_METHOD_BIT + ); + if (uacpi_unlikely(method_obj == UACPI_NULL)) { + uacpi_error("GPE(%02X) AML handler gone\n", event->idx); break; } - uacpi_trace("executing GPE(%02X) handler %.4s\n", - event->idx, event->aml_handler->name.text); + uacpi_trace( + "executing GPE(%02X) handler %.4s\n", + event->idx, uacpi_namespace_node_name(event->aml_handler).text + ); ret = uacpi_execute_control_method( event->aml_handler, method_obj->method, UACPI_NULL, UACPI_NULL @@ -399,7 +444,6 @@ static void async_run_gpe_handler(uacpi_handle opaque) event->idx, event->aml_handler->name.text, uacpi_status_to_string(ret) ); - break; } break; } @@ -424,6 +468,9 @@ static void async_run_gpe_handler(uacpi_handle opaque) break; } + uacpi_namespace_write_unlock(); + +out_no_unlock: /* * We schedule the work as NOTIFICATION to make sure all other notifications * finish before this GPE is re-enabled. @@ -588,7 +635,7 @@ static uacpi_status find_or_create_gpe_interrupt_ctx( ) { uacpi_status ret; - struct gpe_interrupt_ctx *entry = gpe_interrupt_head; + struct gpe_interrupt_ctx *entry = g_gpe_interrupt_head; while (entry) { if (entry->irq == irq) { @@ -618,8 +665,8 @@ static uacpi_status find_or_create_gpe_interrupt_ctx( } entry->irq = irq; - entry->next = gpe_interrupt_head; - gpe_interrupt_head = entry; + entry->next = g_gpe_interrupt_head; + g_gpe_interrupt_head = entry; *out_ctx = entry; return UACPI_STATUS_OK; @@ -639,8 +686,97 @@ static void gpe_release_implicit_notify_handlers(struct gp_event *event) event->implicit_handler = UACPI_NULL; } +enum gpe_block_action +{ + GPE_BLOCK_ACTION_DISABLE_ALL, + GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, + GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, + GPE_BLOCK_ACTION_CLEAR_ALL, +}; + +static uacpi_status gpe_block_apply_action( + struct gpe_block *block, enum gpe_block_action action +) +{ + uacpi_status ret; + uacpi_size i; + uacpi_u8 value; + struct gpe_register *reg; + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + switch (action) { + case GPE_BLOCK_ACTION_DISABLE_ALL: + value = 0; + break; + case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME: + value = reg->runtime_mask & ~reg->masked_mask; + break; + case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE: + value = reg->wake_mask; + break; + case GPE_BLOCK_ACTION_CLEAR_ALL: + ret = uacpi_gas_write(®->status, 0xFF); + if (uacpi_unlikely_error(ret)) + return ret; + continue; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + reg->current_mask = value; + ret = uacpi_gas_write(®->enable, value); + if (uacpi_unlikely_error(ret)) + return ret; + } + + return UACPI_STATUS_OK; +} + +static void gpe_block_mask_safe(struct gpe_block *block) +{ + uacpi_size i; + struct gpe_register *reg; + + for (i = 0; i < block->num_registers; ++i) { + reg = &block->registers[i]; + + // No need to flush or do anything if it's not currently enabled + if (!reg->current_mask) + continue; + + // 1. Mask the GPEs, this makes sure their state is no longer modifyable + reg->masked_mask = 0xFF; + + /* + * 2. Wait for in-flight work & IRQs to finish, these might already + * be past the respective "if (masked)" check and therefore may + * try to re-enable a masked GPE. + */ + uacpi_kernel_wait_for_work_completion(); + + /* + * 3. Now that this GPE's state is unmodifyable and we know that + * currently in-flight IRQs will see the masked state, we can + * safely disable all events knowing they won't be re-enabled by + * a racing IRQ. + */ + uacpi_gas_write(®->enable, 0x00); + + /* + * 4. Wait for the last possible IRQ to finish, now that this event is + * disabled. + */ + uacpi_kernel_wait_for_work_completion(); + } +} + static void uninstall_gpe_block(struct gpe_block *block) { + if (block->registers != UACPI_NULL) + gpe_block_mask_safe(block); + if (block->prev) block->prev->next = block->next; @@ -679,18 +815,6 @@ static void uninstall_gpe_block(struct gpe_block *block) } } - if (block->registers != UACPI_NULL) { - uacpi_size i; - struct gpe_register *reg; - - for (i = 0; i < block->num_registers; ++i) { - reg = &block->registers[i]; - - if (reg->current_mask) - uacpi_gas_write(®->enable, 0x00); - } - } - if (block->events != UACPI_NULL) { uacpi_size i; struct gp_event *event; @@ -748,23 +872,20 @@ struct gpe_match_ctx { uacpi_bool post_dynamic_table_load; }; -static uacpi_ns_iteration_decision do_match_gpe_methods( - uacpi_handle opaque, uacpi_namespace_node *node +static uacpi_iteration_decision do_match_gpe_methods( + uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { uacpi_status ret; struct gpe_match_ctx *ctx = opaque; struct gp_event *event; - uacpi_object *object; uacpi_u8 triggering; uacpi_u64 idx; - object = uacpi_namespace_node_get_object(node); - if (object->type != UACPI_OBJECT_METHOD) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + UACPI_UNUSED(depth); if (node->name.text[0] != '_') - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; switch (node->name.text[1]) { case 'L': @@ -774,18 +895,18 @@ static uacpi_ns_iteration_decision do_match_gpe_methods( triggering = UACPI_GPE_TRIGGERING_EDGE; break; default: - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } ret = uacpi_string_to_integer(&node->name.text[2], 2, UACPI_BASE_HEX, &idx); if (uacpi_unlikely_error(ret)) { uacpi_trace("invalid GPE method name %.4s, ignored\n", node->name.text); - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } event = gpe_from_block(ctx->block, idx); if (event == UACPI_NULL) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; switch (event->handler_type) { /* @@ -809,7 +930,7 @@ static uacpi_ns_iteration_decision do_match_gpe_methods( (uacpi_u32)idx, event->aml_handler->name.text, node->name.text ); } - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; case GPE_HANDLER_TYPE_NATIVE_HANDLER: case GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW: @@ -819,7 +940,7 @@ static uacpi_ns_iteration_decision do_match_gpe_methods( ); UACPI_FALLTHROUGH; default: - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } uacpi_trace("assigned GPE(%02X) -> %.4s\n", @@ -827,24 +948,30 @@ static uacpi_ns_iteration_decision do_match_gpe_methods( event->triggering = triggering; ctx->matched_count++; - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } -uacpi_status uacpi_events_match_post_dynamic_table_load(void) +void uacpi_events_match_post_dynamic_table_load(void) { struct gpe_match_ctx match_ctx = { .post_dynamic_table_load = UACPI_TRUE, }; - struct gpe_interrupt_ctx *irq_ctx = gpe_interrupt_head; + uacpi_namespace_write_unlock(); + + if (uacpi_unlikely_error(uacpi_recursive_lock_acquire(&g_event_lock))) + goto out; + + struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head; while (irq_ctx) { match_ctx.block = irq_ctx->gpe_head; while (match_ctx.block) { - uacpi_namespace_for_each_node_depth_first( - match_ctx.block->device_node, do_match_gpe_methods, - &match_ctx + uacpi_namespace_do_for_each_child( + match_ctx.block->device_node, do_match_gpe_methods, UACPI_NULL, + UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx ); match_ctx.block = match_ctx.block->next; } @@ -857,7 +984,9 @@ uacpi_status uacpi_events_match_post_dynamic_table_load(void) match_ctx.matched_count); } - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + uacpi_namespace_write_lock(); } static uacpi_status create_gpe_block( @@ -938,8 +1067,10 @@ static uacpi_status create_gpe_block( block->irq_ctx->gpe_head = block; match_ctx.block = block; - uacpi_namespace_for_each_node_depth_first( - device_node, do_match_gpe_methods, &match_ctx + uacpi_namespace_do_for_each_child( + device_node, do_match_gpe_methods, UACPI_NULL, + UACPI_OBJECT_METHOD_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, &match_ctx ); uacpi_trace("initialized GPE block %.4s[%d->%d], %d AML handlers (IRQ %d)\n", @@ -952,20 +1083,15 @@ static uacpi_status create_gpe_block( return ret; } -enum gpe_block_iteration_decision { - GPE_BLOCK_ITERATION_DECISION_BREAK, - GPE_BLOCK_ITERATION_DECISION_CONTINUE, -}; - -typedef enum gpe_block_iteration_decision - (*gpe_block_iteration_callback)(struct gpe_block*, uacpi_handle); +typedef uacpi_iteration_decision (*gpe_block_iteration_callback) + (struct gpe_block*, uacpi_handle); static void for_each_gpe_block( gpe_block_iteration_callback cb, uacpi_handle handle ) { - enum gpe_block_iteration_decision decision; - struct gpe_interrupt_ctx *irq_ctx = gpe_interrupt_head; + uacpi_iteration_decision decision; + struct gpe_interrupt_ctx *irq_ctx = g_gpe_interrupt_head; struct gpe_block *block; while (irq_ctx) { @@ -973,7 +1099,7 @@ static void for_each_gpe_block( while (block) { decision = cb(block, handle); - if (decision == GPE_BLOCK_ITERATION_DECISION_BREAK) + if (decision == UACPI_ITERATION_DECISION_BREAK) return; block = block->next; @@ -990,21 +1116,21 @@ struct gpe_search_ctx { struct gp_event *out_event; }; -static enum gpe_block_iteration_decision do_find_gpe( +static uacpi_iteration_decision do_find_gpe( struct gpe_block *block, uacpi_handle opaque ) { struct gpe_search_ctx *ctx = opaque; if (block->device_node != ctx->gpe_device) - return GPE_BLOCK_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; ctx->out_block = block; ctx->out_event = gpe_from_block(block, ctx->idx); if (ctx->out_event == UACPI_NULL) - return GPE_BLOCK_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; - return GPE_BLOCK_ITERATION_DECISION_BREAK; + return UACPI_ITERATION_DECISION_BREAK; } static struct gp_event *get_gpe( @@ -1020,6 +1146,23 @@ static struct gp_event *get_gpe( return ctx.out_event; } +static void gp_event_toggle_masks(struct gp_event *event, uacpi_bool set_on) +{ + uacpi_u8 this_mask; + struct gpe_register *reg = event->reg; + + this_mask = gpe_get_mask(event); + + if (set_on) { + reg->runtime_mask |= this_mask; + reg->current_mask = reg->runtime_mask; + return; + } + + reg->runtime_mask &= ~this_mask; + reg->current_mask = reg->runtime_mask; +} + static uacpi_status gpe_remove_user(struct gp_event *event) { uacpi_status ret = UACPI_STATUS_OK; @@ -1028,12 +1171,13 @@ static uacpi_status gpe_remove_user(struct gp_event *event) return UACPI_STATUS_INVALID_ARGUMENT; if (--event->num_users == 0) { - event->reg->runtime_mask &= ~gpe_get_mask(event); - event->reg->current_mask = event->reg->runtime_mask; + gp_event_toggle_masks(event, UACPI_FALSE); ret = set_gpe_state(event, GPE_STATE_DISABLED); - if (uacpi_unlikely_error(ret)) + if (uacpi_unlikely_error(ret)) { + gp_event_toggle_masks(event, UACPI_TRUE); event->num_users++; + } } return ret; @@ -1057,12 +1201,13 @@ static uacpi_status gpe_add_user( if (clear_if_first == EVENT_CLEAR_IF_FIRST_YES) clear_gpe(event); - event->reg->runtime_mask |= gpe_get_mask(event); - event->reg->current_mask = event->reg->runtime_mask; + gp_event_toggle_masks(event, UACPI_TRUE); ret = set_gpe_state(event, GPE_STATE_ENABLED); - if (uacpi_unlikely_error(ret)) + if (uacpi_unlikely_error(ret)) { + gp_event_toggle_masks(event, UACPI_FALSE); event->num_users--; + } } return ret; @@ -1082,95 +1227,78 @@ const uacpi_char *uacpi_gpe_triggering_to_string( } } -static uacpi_status do_install_gpe_handler( - uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_triggering triggering, enum gpe_handler_type type, - uacpi_gpe_handler handler, uacpi_handle ctx -) +static uacpi_bool gpe_needs_polling(struct gp_event *event) { - struct gp_event *event; - struct gpe_native_handler *native_handler; + return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE; +} - if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX)) - return UACPI_STATUS_INVALID_ARGUMENT; +static uacpi_status gpe_mask_unmask( + struct gp_event *event, uacpi_bool should_mask +) +{ + struct gpe_register *reg; + uacpi_u8 mask; - if (gpe_device == UACPI_NULL) { - gpe_device = uacpi_namespace_get_predefined( - UACPI_PREDEFINED_NAMESPACE_GPE - ); - } + reg = event->reg; + mask = gpe_get_mask(event); - event = get_gpe(gpe_device, idx); - if (uacpi_unlikely(event == UACPI_NULL)) - return UACPI_STATUS_NOT_FOUND; + if (should_mask) { + if (reg->masked_mask & mask) + return UACPI_STATUS_INVALID_ARGUMENT; - if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER || - event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) - return UACPI_STATUS_ALREADY_EXISTS; + // 1. Mask the GPE, this makes sure its state is no longer modifyable + reg->masked_mask |= mask; - native_handler = uacpi_kernel_alloc(sizeof(*native_handler)); - if (uacpi_unlikely(native_handler == UACPI_NULL)) - return UACPI_STATUS_OUT_OF_MEMORY; + /* + * 2. Wait for in-flight work & IRQs to finish, these might already + * be past the respective "if (masked)" check and therefore may + * try to re-enable a masked GPE. + */ + uacpi_kernel_wait_for_work_completion(); - native_handler->cb = handler; - native_handler->ctx = ctx; - native_handler->previous_handler = event->any_handler; - native_handler->previous_handler_type = event->handler_type; - native_handler->previous_triggering = event->triggering; - native_handler->previously_enabled = UACPI_FALSE; + /* + * 3. Now that this GPE's state is unmodifyable and we know that currently + * in-flight IRQs will see the masked state, we can safely disable this + * event knowing it won't be re-enabled by a racing IRQ. + */ + set_gpe_state(event, GPE_STATE_DISABLED); - if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || - event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && - event->num_users != 0) { - native_handler->previously_enabled = UACPI_TRUE; - gpe_remove_user(event); + /* + * 4. Wait for the last possible IRQ to finish, now that this event is + * disabled. + */ + uacpi_kernel_wait_for_work_completion(); - if (uacpi_unlikely(event->triggering != triggering)) { - uacpi_warn( - "GPE(%02X) user handler claims %s triggering, originally " - "configured as %s\n", idx, - uacpi_gpe_triggering_to_string(triggering), - uacpi_gpe_triggering_to_string(event->triggering) - ); - } + return UACPI_STATUS_OK; } - event->native_handler = native_handler; - event->handler_type = type; - event->triggering = triggering; - return UACPI_STATUS_OK; -} + if (!(reg->masked_mask & mask)) + return UACPI_STATUS_INVALID_ARGUMENT; -uacpi_status uacpi_install_gpe_handler( - uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, - uacpi_handle ctx -) -{ - return do_install_gpe_handler( - gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER, - handler, ctx - ); -} + reg->masked_mask &= ~mask; + if (!event->block_interrupts && event->num_users) + set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); -uacpi_status uacpi_install_gpe_handler_raw( - uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, - uacpi_handle ctx -) -{ - return do_install_gpe_handler( - gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW, - handler, ctx - ); + return UACPI_STATUS_OK; } -static uacpi_bool gpe_needs_polling(struct gp_event *event) +/* + * Safely mask the event before we modify its handlers. + * + * This makes sure we can't get an IRQ in the middle of modifying this + * event's structures. + */ +static uacpi_bool gpe_mask_safe(struct gp_event *event) { - return event->num_users && event->triggering == UACPI_GPE_TRIGGERING_EDGE; + // No need to flush or do anything if it's not currently enabled + if (!(event->reg->current_mask & gpe_get_mask(event))) + return UACPI_FALSE; + + gpe_mask_unmask(event, UACPI_TRUE); + return UACPI_TRUE; } -static enum gpe_block_iteration_decision do_initialize_gpe_block( +static uacpi_iteration_decision do_initialize_gpe_block( struct gpe_block *block, uacpi_handle opaque ) { @@ -1206,23 +1334,32 @@ static enum gpe_block_iteration_decision do_initialize_gpe_block( block->base_idx, block->base_idx + block->num_events ); } - return GPE_BLOCK_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } uacpi_status uacpi_finalize_gpe_initialization(void) { - static uacpi_bool gpes_finalized = UACPI_FALSE; + uacpi_status ret; uacpi_bool poll_blocks = UACPI_FALSE; - if (gpes_finalized) - return UACPI_STATUS_OK; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (g_gpes_finalized) + goto out; + + g_gpes_finalized = UACPI_TRUE; for_each_gpe_block(do_initialize_gpe_block, &poll_blocks); if (poll_blocks) - detect_gpes(gpe_interrupt_head->gpe_head); + detect_gpes(g_gpe_interrupt_head->gpe_head); - gpes_finalized = UACPI_TRUE; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } static uacpi_status sanitize_device_and_find_gpe( @@ -1243,43 +1380,156 @@ static uacpi_status sanitize_device_and_find_gpe( return UACPI_STATUS_OK; } -uacpi_status uacpi_uninstall_gpe_handler( +static uacpi_status do_install_gpe_handler( uacpi_namespace_node *gpe_device, uacpi_u16 idx, - uacpi_gpe_handler handler + uacpi_gpe_triggering triggering, enum gpe_handler_type type, + uacpi_gpe_handler handler, uacpi_handle ctx ) { uacpi_status ret; struct gp_event *event; struct gpe_native_handler *native_handler; + uacpi_bool did_mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + if (uacpi_unlikely(triggering > UACPI_GPE_TRIGGERING_MAX)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER && - event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) - return UACPI_STATUS_NOT_FOUND; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; - native_handler = event->native_handler; - if (uacpi_unlikely(native_handler->cb != handler)) - return UACPI_STATUS_INVALID_ARGUMENT; + if (event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } - event->aml_handler = native_handler->previous_handler; - event->triggering = native_handler->previous_triggering; + native_handler = uacpi_kernel_alloc(sizeof(*native_handler)); + if (uacpi_unlikely(native_handler == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } + + native_handler->cb = handler; + native_handler->ctx = ctx; + native_handler->previous_handler = event->any_handler; + native_handler->previous_handler_type = event->handler_type; + native_handler->previous_triggering = event->triggering; + native_handler->previously_enabled = UACPI_FALSE; + + did_mask = gpe_mask_safe(event); + + if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || + event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && + event->num_users != 0) { + native_handler->previously_enabled = UACPI_TRUE; + gpe_remove_user(event); + + if (uacpi_unlikely(event->triggering != triggering)) { + uacpi_warn( + "GPE(%02X) user handler claims %s triggering, originally " + "configured as %s\n", idx, + uacpi_gpe_triggering_to_string(triggering), + uacpi_gpe_triggering_to_string(event->triggering) + ); + } + } + + event->native_handler = native_handler; + event->handler_type = type; + event->triggering = triggering; + + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} + +uacpi_status uacpi_install_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, + uacpi_handle ctx +) +{ + return do_install_gpe_handler( + gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER, + handler, ctx + ); +} + +uacpi_status uacpi_install_gpe_handler_raw( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_triggering triggering, uacpi_gpe_handler handler, + uacpi_handle ctx +) +{ + return do_install_gpe_handler( + gpe_device, idx, triggering, GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW, + handler, ctx + ); +} + +uacpi_status uacpi_uninstall_gpe_handler( + uacpi_namespace_node *gpe_device, uacpi_u16 idx, + uacpi_gpe_handler handler +) +{ + uacpi_status ret; + struct gp_event *event; + struct gpe_native_handler *native_handler; + uacpi_bool did_mask; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER && + event->handler_type != GPE_HANDLER_TYPE_NATIVE_HANDLER_RAW) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + native_handler = event->native_handler; + if (uacpi_unlikely(native_handler->cb != handler)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + did_mask = gpe_mask_safe(event); + + event->aml_handler = native_handler->previous_handler; + event->triggering = native_handler->previous_triggering; event->handler_type = native_handler->previous_handler_type; if ((event->handler_type == GPE_HANDLER_TYPE_AML_HANDLER || event->handler_type == GPE_HANDLER_TYPE_IMPLICIT_NOTIFY) && native_handler->previously_enabled) { gpe_add_user(event, EVENT_CLEAR_IF_FIRST_NO); - - if (gpe_needs_polling(event)) - maybe_dispatch_gpe(gpe_device, event); } - uacpi_kernel_wait_for_work_completion(); uacpi_free(native_handler, sizeof(*native_handler)); - return UACPI_STATUS_OK; + + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); + + if (gpe_needs_polling(event)) + maybe_dispatch_gpe(gpe_device, event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_enable_gpe( @@ -1289,21 +1539,31 @@ uacpi_status uacpi_enable_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) - return UACPI_STATUS_NO_HANDLER; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (uacpi_unlikely(event->handler_type == GPE_HANDLER_TYPE_NONE)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } ret = gpe_add_user(event, EVENT_CLEAR_IF_FIRST_YES); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (gpe_needs_polling(event)) maybe_dispatch_gpe(gpe_device, event); - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_disable_gpe( @@ -1313,11 +1573,20 @@ uacpi_status uacpi_disable_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - return gpe_remove_user(event); + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + ret = gpe_remove_user(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_clear_gpe( @@ -1327,13 +1596,21 @@ uacpi_status uacpi_clear_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - return clear_gpe(event); -} + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + ret = clear_gpe(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; +} static uacpi_status gpe_suspend_resume( uacpi_namespace_node *gpe_device, uacpi_u16 idx, enum gpe_state state @@ -1342,12 +1619,21 @@ static uacpi_status gpe_suspend_resume( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + event->block_interrupts = state == GPE_STATE_DISABLED; - return set_gpe_state(event, state); + ret = set_gpe_state(event, state); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_suspend_gpe( @@ -1371,15 +1657,27 @@ uacpi_status uacpi_finish_handling_gpe( uacpi_status ret; struct gp_event *event; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + event = get_gpe(gpe_device, idx); - if (uacpi_unlikely(event == UACPI_NULL)) - return UACPI_STATUS_NOT_FOUND; + if (uacpi_unlikely(event == UACPI_NULL)) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } + + ret = restore_gpe(event); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; - return restore_gpe(event); } static uacpi_status gpe_get_mask_unmask( @@ -1388,32 +1686,22 @@ static uacpi_status gpe_get_mask_unmask( { uacpi_status ret; struct gp_event *event; - struct gpe_register *reg; - uacpi_u8 mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - reg = event->reg; - mask = gpe_get_mask(event); - - if (should_mask) { - if (reg->masked_mask & mask) - return UACPI_STATUS_INVALID_ARGUMENT; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; - set_gpe_state(event, GPE_STATE_DISABLED); - reg->masked_mask |= mask; - return UACPI_STATUS_OK; - } + ret = gpe_mask_unmask(event, should_mask); - if (!(reg->masked_mask & mask)) - return UACPI_STATUS_INVALID_ARGUMENT; - - reg->masked_mask &= ~mask; - if (!event->block_interrupts && event->num_users) - set_gpe_state(event, GPE_STATE_ENABLED_CONDITIONALLY); - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_mask_gpe( @@ -1437,19 +1725,34 @@ uacpi_status uacpi_setup_gpe_for_wake( { uacpi_status ret; struct gp_event *event; + uacpi_bool did_mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); - if (uacpi_unlikely_error(ret)) - return ret; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); if (wake_device != UACPI_NULL) { - uacpi_object *obj; + uacpi_bool is_dev = wake_device == uacpi_namespace_root(); + + if (!is_dev) { + ret = uacpi_namespace_node_is(wake_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + } - obj = uacpi_namespace_node_get_object(wake_device); - if (wake_device != uacpi_namespace_root() && - obj->type != UACPI_OBJECT_DEVICE) + if (!is_dev) return UACPI_STATUS_INVALID_ARGUMENT; + } + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + did_mask = gpe_mask_safe(event); + if (wake_device != UACPI_NULL) { switch (event->handler_type) { case GPE_HANDLER_TYPE_NONE: event->handler_type = GPE_HANDLER_TYPE_IMPLICIT_NOTIFY; @@ -1481,7 +1784,8 @@ uacpi_status uacpi_setup_gpe_for_wake( default: uacpi_warn("invalid GPE(%02X) handler type: %d\n", event->idx, event->handler_type); - return UACPI_STATUS_INTERNAL_ERROR; + ret = UACPI_STATUS_INTERNAL_ERROR; + goto out_unmask; } /* @@ -1495,8 +1799,10 @@ uacpi_status uacpi_setup_gpe_for_wake( implicit_handler = event->implicit_handler; while (implicit_handler) { - if (implicit_handler->device == wake_device) - return UACPI_STATUS_ALREADY_EXISTS; + if (implicit_handler->device == wake_device) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out_unmask; + } implicit_handler = implicit_handler->next; } @@ -1516,7 +1822,13 @@ uacpi_status uacpi_setup_gpe_for_wake( } event->wake = UACPI_TRUE; - return UACPI_STATUS_OK; + +out_unmask: + if (did_mask) + gpe_mask_unmask(event, UACPI_FALSE); +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } static uacpi_status gpe_enable_disable_for_wake( @@ -1528,12 +1840,20 @@ static uacpi_status gpe_enable_disable_for_wake( struct gpe_register *reg; uacpi_u8 mask; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; - if (!event->wake) - return UACPI_STATUS_INVALID_ARGUMENT; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + + if (!event->wake) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } reg = event->reg; mask = gpe_get_mask(event); @@ -1543,7 +1863,9 @@ static uacpi_status gpe_enable_disable_for_wake( else reg->wake_mask &= mask; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_enable_gpe_for_wake( @@ -1554,62 +1876,44 @@ uacpi_status uacpi_enable_gpe_for_wake( } uacpi_status uacpi_disable_gpe_for_wake( - uacpi_namespace_node *gpe_device, uacpi_u16 idx + uacpi_namespace_node *gpe_device, uacpi_u16 idx ) { return gpe_enable_disable_for_wake(gpe_device, idx, UACPI_FALSE); } -enum gpe_block_action { - GPE_BLOCK_ACTION_DISABLE_ALL, - GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, - GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, - GPE_BLOCK_ACTION_CLEAR_ALL, -}; - struct do_for_all_gpes_ctx { enum gpe_block_action action; uacpi_status ret; }; -static enum gpe_block_iteration_decision do_for_all_gpes( +static uacpi_iteration_decision do_for_all_gpes( struct gpe_block *block, uacpi_handle opaque ) { struct do_for_all_gpes_ctx *ctx = opaque; - struct gpe_register *reg; - uacpi_u8 value; - uacpi_size i; - for (i = 0; i < block->num_registers; ++i) { - reg = &block->registers[i]; + ctx->ret = gpe_block_apply_action(block, ctx->action); + if (uacpi_unlikely_error(ctx->ret)) + return UACPI_ITERATION_DECISION_BREAK; - switch (ctx->action) { - case GPE_BLOCK_ACTION_DISABLE_ALL: - value = 0; - break; - case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME: - value = reg->runtime_mask & ~reg->masked_mask; - break; - case GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE: - value = reg->wake_mask; - break; - case GPE_BLOCK_ACTION_CLEAR_ALL: - ctx->ret = uacpi_gas_write(®->status, 0xFF); - if (uacpi_unlikely_error(ctx->ret)) - return GPE_BLOCK_ITERATION_DECISION_BREAK; - continue; - default: - continue; - } + return UACPI_ITERATION_DECISION_CONTINUE; +} - reg->current_mask = value; - ctx->ret = uacpi_gas_write(®->enable, value); - if (uacpi_unlikely_error(ctx->ret)) - return GPE_BLOCK_ITERATION_DECISION_BREAK; - } +static uacpi_status for_all_gpes_locked(struct do_for_all_gpes_ctx *ctx) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; - return GPE_BLOCK_ITERATION_DECISION_CONTINUE; + for_each_gpe_block(do_for_all_gpes, ctx); + + uacpi_recursive_lock_release(&g_event_lock); + return ctx->ret; } uacpi_status uacpi_disable_all_gpes(void) @@ -1617,9 +1921,7 @@ uacpi_status uacpi_disable_all_gpes(void) struct do_for_all_gpes_ctx ctx = { .action = GPE_BLOCK_ACTION_DISABLE_ALL, }; - - for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + return for_all_gpes_locked(&ctx); } uacpi_status uacpi_enable_all_runtime_gpes(void) @@ -1627,9 +1929,7 @@ uacpi_status uacpi_enable_all_runtime_gpes(void) struct do_for_all_gpes_ctx ctx = { .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_RUNTIME, }; - - for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + return for_all_gpes_locked(&ctx); } uacpi_status uacpi_enable_all_wake_gpes(void) @@ -1637,12 +1937,10 @@ uacpi_status uacpi_enable_all_wake_gpes(void) struct do_for_all_gpes_ctx ctx = { .action = GPE_BLOCK_ACTION_ENABLE_ALL_FOR_WAKE, }; - - for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + return for_all_gpes_locked(&ctx); } -static uacpi_status initialize_gpes() +static uacpi_status initialize_gpes(void) { uacpi_status ret; uacpi_namespace_node *gpe_node; @@ -1696,42 +1994,74 @@ static uacpi_status initialize_gpes() return UACPI_STATUS_OK; } -uacpi_status uacpi_gpe_install_block( +uacpi_status uacpi_install_gpe_block( uacpi_namespace_node *gpe_device, uacpi_u64 address, uacpi_address_space address_space, uacpi_u16 num_registers, uacpi_u32 irq ) { - uacpi_object *obj; + uacpi_status ret; + uacpi_bool is_dev; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); - obj = uacpi_namespace_node_get_object(gpe_device); - if (obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE) + ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + if (!is_dev) return UACPI_STATUS_INVALID_ARGUMENT; - return create_gpe_block( + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + if (uacpi_unlikely(get_gpe(gpe_device, 0) != UACPI_NULL)) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } + + ret = create_gpe_block( gpe_device, irq, 0, address, address_space, num_registers ); + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } -uacpi_status uacpi_gpe_uninstall_block( +uacpi_status uacpi_uninstall_gpe_block( uacpi_namespace_node *gpe_device ) { - uacpi_object *obj; + uacpi_status ret; + uacpi_bool is_dev; struct gpe_search_ctx search_ctx = { .idx = 0, .gpe_device = gpe_device, }; - obj = uacpi_namespace_node_get_object(gpe_device); - if (uacpi_unlikely(obj == UACPI_NULL || obj->type != UACPI_OBJECT_DEVICE)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_namespace_node_is(gpe_device, UACPI_OBJECT_DEVICE, &is_dev); + if (uacpi_unlikely_error(ret)) + return ret; + if (!is_dev) return UACPI_STATUS_INVALID_ARGUMENT; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + for_each_gpe_block(do_find_gpe, &search_ctx); - if (search_ctx.out_block == UACPI_NULL) - return UACPI_STATUS_NOT_FOUND; + if (search_ctx.out_block == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } uninstall_gpe_block(search_ctx.out_block); - return UACPI_STATUS_OK; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } static uacpi_interrupt_ret handle_global_lock(uacpi_handle ctx) @@ -1771,23 +2101,41 @@ static uacpi_interrupt_ret handle_sci(uacpi_handle ctx) return int_ret; } -uacpi_status uacpi_initialize_events(void) +uacpi_status uacpi_initialize_events_early(void) { uacpi_status ret; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + g_gpe_state_slock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_gpe_state_slock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + ret = uacpi_recursive_lock_init(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ret = initialize_fixed_events(); if (uacpi_unlikely_error(ret)) return ret; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_initialize_events(void) +{ + uacpi_status ret; + + if (uacpi_is_hardware_reduced()) + return UACPI_STATUS_OK; + ret = initialize_gpes(); if (uacpi_unlikely_error(ret)) return ret; ret = uacpi_kernel_install_interrupt_handler( - g_uacpi_rt_ctx.fadt.sci_int, handle_sci, gpe_interrupt_head, + g_uacpi_rt_ctx.fadt.sci_int, handle_sci, g_gpe_interrupt_head, &g_uacpi_rt_ctx.sci_handle ); if (uacpi_unlikely_error(ret)) { @@ -1827,9 +2175,11 @@ uacpi_status uacpi_initialize_events(void) void uacpi_deinitialize_events(void) { - struct gpe_interrupt_ctx *ctx, *next_ctx = gpe_interrupt_head; + struct gpe_interrupt_ctx *ctx, *next_ctx = g_gpe_interrupt_head; uacpi_size i; + g_gpes_finalized = UACPI_FALSE; + while (next_ctx) { ctx = next_ctx; next_ctx = ctx->next; @@ -1847,7 +2197,14 @@ void uacpi_deinitialize_events(void) uacpi_uninstall_fixed_event_handler(i); } - gpe_interrupt_head = UACPI_NULL; + if (g_gpe_state_slock != UACPI_NULL) { + uacpi_kernel_free_spinlock(g_gpe_state_slock); + g_gpe_state_slock = UACPI_NULL; + } + + uacpi_recursive_lock_deinit(&g_event_lock); + + g_gpe_interrupt_head = UACPI_NULL; } uacpi_status uacpi_install_fixed_event_handler( @@ -1858,15 +2215,23 @@ uacpi_status uacpi_install_fixed_event_handler( uacpi_status ret; struct fixed_event_handler *ev; - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ev = &fixed_event_handlers[event]; - if (ev->handler != UACPI_NULL) - return UACPI_STATUS_ALREADY_EXISTS; + if (ev->handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } ev->handler = handler; ev->ctx = user; @@ -1875,10 +2240,11 @@ uacpi_status uacpi_install_fixed_event_handler( if (uacpi_unlikely_error(ret)) { ev->handler = UACPI_NULL; ev->ctx = UACPI_NULL; - return ret; } - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_uninstall_fixed_event_handler( @@ -1888,21 +2254,31 @@ uacpi_status uacpi_uninstall_fixed_event_handler( uacpi_status ret; struct fixed_event_handler *ev; - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_OK; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + ev = &fixed_event_handlers[event]; ret = set_event(event, UACPI_EVENT_DISABLED); if (uacpi_unlikely_error(ret)) - return ret; + goto out; + + uacpi_kernel_wait_for_work_completion(); ev->handler = UACPI_NULL; ev->ctx = UACPI_NULL; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_fixed_event_info( @@ -1914,11 +2290,17 @@ uacpi_status uacpi_fixed_event_info( uacpi_u64 raw_value; uacpi_event_info info = 0; - if (uacpi_unlikely(event > UACPI_FIXED_EVENT_MAX)) + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(event < 0 || event > UACPI_FIXED_EVENT_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; if (uacpi_is_hardware_reduced()) return UACPI_STATUS_NOT_FOUND; + ret = uacpi_recursive_lock_acquire(&g_event_lock); + if (uacpi_unlikely_error(ret)) + return ret; + if (fixed_event_handlers[event].handler != UACPI_NULL) info |= UACPI_EVENT_INFO_HAS_HANDLER; @@ -1926,18 +2308,20 @@ uacpi_status uacpi_fixed_event_info( ret = uacpi_read_register_field(ev->enable_field, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value) info |= UACPI_EVENT_INFO_ENABLED | UACPI_EVENT_INFO_HW_ENABLED; ret = uacpi_read_register_field(ev->status_field, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value) info |= UACPI_EVENT_INFO_HW_STATUS; *out_info = info; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } uacpi_status uacpi_gpe_info( @@ -1951,10 +2335,16 @@ uacpi_status uacpi_gpe_info( uacpi_u64 raw_value; uacpi_event_info info = 0; - ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = sanitize_device_and_find_gpe(&gpe_device, idx, &event); + if (uacpi_unlikely_error(ret)) + goto out; + if (event->handler_type != GPE_HANDLER_TYPE_NONE) info |= UACPI_EVENT_INFO_HAS_HANDLER; @@ -1970,18 +2360,20 @@ uacpi_status uacpi_gpe_info( ret = uacpi_gas_read(®->enable, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value & mask) info |= UACPI_EVENT_INFO_HW_ENABLED; ret = uacpi_gas_read(®->status, &raw_value); if (uacpi_unlikely_error(ret)) - return ret; + goto out; if (raw_value & mask) info |= UACPI_EVENT_INFO_HW_STATUS; *out_info = info; - return UACPI_STATUS_OK; +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } #define PM1_STATUS_BITS ( \ @@ -2002,12 +2394,22 @@ uacpi_status uacpi_clear_all_events(void) .action = GPE_BLOCK_ACTION_CLEAR_ALL, }; - ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS); + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + ret = uacpi_recursive_lock_acquire(&g_event_lock); if (uacpi_unlikely_error(ret)) return ret; + ret = uacpi_write_register(UACPI_REGISTER_PM1_STS, PM1_STATUS_BITS); + if (uacpi_unlikely_error(ret)) + goto out; + for_each_gpe_block(do_for_all_gpes, &ctx); - return ctx.ret; + ret = ctx.ret; + +out: + uacpi_recursive_lock_release(&g_event_lock); + return ret; } #endif diff --git a/drivers/bus/acpi_new/source/interpreter.c b/drivers/bus/acpi_new/source/interpreter.c index 8dbdfe169354c..0454f6ad7d300 100644 --- a/drivers/bus/acpi_new/source/interpreter.c +++ b/drivers/bus/acpi_new/source/interpreter.c @@ -21,7 +21,6 @@ enum item_type { ITEM_NONE = 0, ITEM_NAMESPACE_NODE, - ITEM_NAMESPACE_NODE_METHOD_LOCAL, ITEM_OBJECT, ITEM_EMPTY_OBJECT, ITEM_PACKAGE_LENGTH, @@ -332,7 +331,7 @@ struct execution_context { uacpi_u8 sync_level; }; -#define AML_READ(ptr, offset) (*(((uacpi_u8*)(code)) + offset)) +#define AML_READ(ptr, offset) (*(((uacpi_u8*)(ptr)) + offset)) static uacpi_status parse_nameseg(uacpi_u8 *cursor, uacpi_object_name *out_name) @@ -619,6 +618,10 @@ static uacpi_status resolve_name_string( out: cursor += namesegs * 4; frame->code_offset = cursor - frame->method->code; + + if (uacpi_likely_success(ret) && behavior == RESOLVE_FAIL_IF_DOESNT_EXIST) + uacpi_shareable_ref(cur_node); + *out_node = cur_node; return ret; } @@ -628,7 +631,7 @@ static uacpi_status do_install_node_item(struct call_frame *frame, { uacpi_status ret; - ret = uacpi_node_install(item->node->parent, item->node); + ret = uacpi_namespace_node_install(item->node->parent, item->node); if (uacpi_unlikely_error(ret)) return ret; @@ -660,6 +663,8 @@ static uacpi_status get_op(struct execution_context *ctx) op |= AML_READ(code, frame->code_offset++); } + g_uacpi_rt_ctx.opcodes_executed++; + ctx->cur_op = uacpi_get_op_spec(op); if (uacpi_unlikely(ctx->cur_op->properties & UACPI_OP_PROPERTY_RESERVED)) { uacpi_error( @@ -725,7 +730,7 @@ static uacpi_status handle_buffer(struct execution_context *ctx) return UACPI_STATUS_OK; } -uacpi_status handle_string(struct execution_context *ctx) +static uacpi_status handle_string(struct execution_context *ctx) { struct call_frame *frame = ctx->cur_frame; uacpi_object *obj; @@ -796,7 +801,8 @@ static uacpi_status handle_package(struct execution_context *ctx) } // 2. Create every object in the package, start as uninitialized - if (uacpi_unlikely(!uacpi_package_fill(package, num_elements))) + if (uacpi_unlikely(!uacpi_package_fill(package, num_elements, + UACPI_PREALLOC_OBJECTS_YES))) return UACPI_STATUS_OUT_OF_MEMORY; // 3. Go through every defined object and copy it into the package @@ -873,7 +879,7 @@ static uacpi_size field_byte_size(uacpi_object *obj) return uacpi_round_up_bits_to_bytes(bit_length); } -static uacpi_size sizeof_int() +static uacpi_size sizeof_int(void) { return g_uacpi_rt_ctx.is_rev1 ? 4 : 8; } @@ -1118,10 +1124,7 @@ static uacpi_status handle_create_op_region(struct execution_context *ctx) if (uacpi_unlikely(node->object == UACPI_NULL)) return UACPI_STATUS_OUT_OF_MEMORY; - if (uacpi_opregion_find_and_install_handler(node) == UACPI_STATUS_OK && - uacpi_get_current_init_level() >= UACPI_INIT_LEVEL_NAMESPACE_LOADED) - uacpi_opregion_reg(node); - + uacpi_initialize_opregion_node(node); return UACPI_STATUS_OK; } @@ -1212,6 +1215,7 @@ static uacpi_status handle_create_data_region(struct execution_context *ctx) op_region->space = UACPI_ADDRESS_SPACE_TABLE_DATA; op_region->offset = table.virt_addr; op_region->length = table.hdr->length; + op_region->table_idx = table.index; node->object = uacpi_create_internal_reference( UACPI_REFERENCE_KIND_NAMED, obj @@ -1219,9 +1223,7 @@ static uacpi_status handle_create_data_region(struct execution_context *ctx) if (uacpi_unlikely(node->object == UACPI_NULL)) return UACPI_STATUS_OUT_OF_MEMORY; - if (uacpi_opregion_find_and_install_handler(node) == UACPI_STATUS_OK) - uacpi_opregion_reg(node); - + uacpi_initialize_opregion_node(node); return UACPI_STATUS_OK; } @@ -1269,7 +1271,7 @@ static uacpi_status do_load_table( return ret; if (is_dynamic_table_load(cause)) - ret = uacpi_events_match_post_dynamic_table_load(); + uacpi_events_match_post_dynamic_table_load(); return ret; } @@ -1278,6 +1280,7 @@ static uacpi_status handle_load_table(struct execution_context *ctx) { uacpi_status ret; struct item_array *items = &ctx->cur_op_ctx->items; + struct item *root_node_item; struct uacpi_table_identifiers table_id; uacpi_table table; uacpi_buffer *root_path, *param_path; @@ -1290,18 +1293,23 @@ static uacpi_status handle_load_table(struct execution_context *ctx) * new AML GPE handlers that might've been loaded, as well as potentially * remove the target. */ - if (item_array_size(items) == 11) { + if (item_array_size(items) == 12) { + uacpi_size idx; + + idx = item_array_at(items, 2)->immediate; + uacpi_table_unref(&(struct uacpi_table) { .index = idx }); + /* * If this load failed, remove the target that was provided via * ParameterPathString so that it doesn't get stored to. */ - if (uacpi_unlikely(item_array_at(items, 10)->obj->integer == 0)) { + if (uacpi_unlikely(item_array_at(items, 11)->obj->integer == 0)) { uacpi_object *target; - target = item_array_at(items, 2)->obj; + target = item_array_at(items, 3)->obj; if (target != UACPI_NULL) { uacpi_object_unref(target); - item_array_at(items, 2)->obj = UACPI_NULL; + item_array_at(items, 3)->obj = UACPI_NULL; } return UACPI_STATUS_OK; @@ -1313,41 +1321,53 @@ static uacpi_status handle_load_table(struct execution_context *ctx) ret = build_table_id( "LoadTable", &table_id, - item_array_at(items, 4)->obj->buffer, item_array_at(items, 5)->obj->buffer, - item_array_at(items, 6)->obj->buffer + item_array_at(items, 6)->obj->buffer, + item_array_at(items, 7)->obj->buffer ); if (uacpi_unlikely_error(ret)) return ret; - root_path = item_array_at(items, 7)->obj->buffer; - param_path = item_array_at(items, 8)->obj->buffer; + root_path = item_array_at(items, 8)->obj->buffer; + param_path = item_array_at(items, 9)->obj->buffer; + root_node_item = item_array_at(items, 0); if (root_path->size > 1) { - root_node = uacpi_namespace_node_resolve_from_aml_namepath( - ctx->cur_frame->cur_scope, root_path->text + ret = uacpi_namespace_node_resolve( + ctx->cur_frame->cur_scope, root_path->text, UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, UACPI_PERMANENT_ONLY_NO, + &root_node ); - if (uacpi_unlikely(root_node == UACPI_NULL)) - return table_id_error("LoadTable", "RootPathString", root_path); + if (uacpi_unlikely_error(ret)) { + table_id_error("LoadTable", "RootPathString", root_path); + if (ret == UACPI_STATUS_NOT_FOUND) + ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE; + return ret; + } } else { root_node = uacpi_namespace_root(); } - item_array_at(items, 0)->node = root_node; + root_node_item->node = root_node; + root_node_item->type = ITEM_NAMESPACE_NODE; + uacpi_shareable_ref(root_node); if (param_path->size > 1) { struct item *param_item; - param_node = uacpi_namespace_node_resolve_from_aml_namepath( - root_node, param_path->text + ret = uacpi_namespace_node_resolve( + root_node, param_path->text, UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_YES, UACPI_PERMANENT_ONLY_NO, + ¶m_node ); - if (uacpi_unlikely(param_node == UACPI_NULL)) { - return table_id_error( - "LoadTable", "ParameterPathString", root_path - ); + if (uacpi_unlikely_error(ret)) { + table_id_error("LoadTable", "ParameterPathString", root_path); + if (ret == UACPI_STATUS_NOT_FOUND) + ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE; + return ret; } - param_item = item_array_at(items, 2); + param_item = item_array_at(items, 3); param_item->obj = param_node->object; uacpi_object_ref(param_item->obj); param_item->type = ITEM_OBJECT; @@ -1360,6 +1380,7 @@ static uacpi_status handle_load_table(struct execution_context *ctx) } uacpi_table_mark_as_loaded(table.index); + item_array_at(items, 2)->immediate = table.index; method = item_array_at(items, 1)->obj->method; prepare_table_load(table.hdr, UACPI_TABLE_LOAD_CAUSE_LOAD_TABLE_OP, method); @@ -1504,10 +1525,19 @@ static uacpi_status handle_load(struct execution_context *ctx) uacpi_status uacpi_execute_table(void *tbl, enum uacpi_table_load_cause cause) { - return do_load_table(uacpi_namespace_root(), tbl, cause); + uacpi_status ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = do_load_table(uacpi_namespace_root(), tbl, cause); + + uacpi_namespace_write_unlock(); + return ret; } -uacpi_u32 get_field_length(struct item *item) +static uacpi_u32 get_field_length(struct item *item) { struct package_length *pkg = &item->pkg; return pkg->end - pkg->begin; @@ -1528,7 +1558,7 @@ static uacpi_status ensure_is_a_field_unit(uacpi_namespace_node *node, obj = uacpi_namespace_node_get_object(node); if (obj->type != UACPI_OBJECT_FIELD_UNIT) { uacpi_error( - "Invalid argument: '%.4s' is not a field unit (%s)\n", + "invalid argument: '%.4s' is not a field unit (%s)\n", node->name.text, uacpi_object_type_to_string(obj->type) ); return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; @@ -1546,7 +1576,7 @@ static uacpi_status ensure_is_an_op_region(uacpi_namespace_node *node, obj = uacpi_namespace_node_get_object(node); if (obj->type != UACPI_OBJECT_OPERATION_REGION) { uacpi_error( - "Invalid argument: '%.4s' is not an operation region (%s)\n", + "invalid argument: '%.4s' is not an operation region (%s)\n", node->name.text, uacpi_object_type_to_string(obj->type) ); return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; @@ -1636,7 +1666,7 @@ static uacpi_status handle_create_field(struct execution_context *ctx) item = item_array_at(&op_ctx->items, i++); // An actual field object - if (item->type == ITEM_NAMESPACE_NODE_METHOD_LOCAL) { + if (item->type == ITEM_NAMESPACE_NODE) { uacpi_u32 length; uacpi_field_unit *field; @@ -1817,7 +1847,7 @@ static void truncate_number_if_needed(uacpi_object *obj) obj->integer &= 0xFFFFFFFF; } -static uacpi_u64 ones() +static uacpi_u64 ones(void) { return g_uacpi_rt_ctx.is_rev1 ? 0xFFFFFFFF : 0xFFFFFFFFFFFFFFFF; } @@ -1909,8 +1939,6 @@ static void update_scope(struct call_frame *frame) frame->cur_scope = block->node; } -#define TICKS_PER_SECOND (1000ull * 1000ull * 10ull) - static uacpi_status begin_block_execution(struct execution_context *ctx) { struct call_frame *cur_frame = ctx->cur_frame; @@ -1937,7 +1965,7 @@ static uacpi_status begin_block_execution(struct execution_context *ctx) if (pkg->begin == cur_frame->prev_while_code_offset) { uacpi_u64 cur_ticks; - cur_ticks = uacpi_kernel_get_ticks(); + cur_ticks = uacpi_kernel_get_nanoseconds_since_boot(); if (uacpi_unlikely(cur_ticks > block->expiration_point)) { uacpi_error("loop time out after running for %u seconds\n", @@ -1952,9 +1980,9 @@ static uacpi_status begin_block_execution(struct execution_context *ctx) * Calculate the expiration point for this loop. * If a loop is executed past this point, it will get aborted. */ - block->expiration_point = uacpi_kernel_get_ticks(); + block->expiration_point = uacpi_kernel_get_nanoseconds_since_boot(); block->expiration_point += - g_uacpi_rt_ctx.loop_timeout_seconds * TICKS_PER_SECOND; + g_uacpi_rt_ctx.loop_timeout_seconds * UACPI_NANOSECONDS_PER_SEC; } break; case UACPI_AML_OP_ScopeOp: @@ -2123,7 +2151,7 @@ static uacpi_status debug_store(uacpi_object *src) /* * NOTE: this function returns the parent object */ -uacpi_object *reference_unwind(uacpi_object *obj) +static uacpi_object *reference_unwind(uacpi_object *obj) { uacpi_object *parent = obj; @@ -2139,8 +2167,39 @@ uacpi_object *reference_unwind(uacpi_object *obj) return UACPI_NULL; } +static uacpi_iteration_decision opregion_try_detach_from_parent( + void *user, uacpi_namespace_node *node, uacpi_u32 node_depth +) +{ + uacpi_object *target_object = user; + UACPI_UNUSED(node_depth); + + if (node->object == target_object) { + uacpi_opregion_uninstall_handler(node); + return UACPI_ITERATION_DECISION_BREAK; + } + + return UACPI_ITERATION_DECISION_CONTINUE; +} + static void object_replace_child(uacpi_object *parent, uacpi_object *new_child) { + if (parent->flags == UACPI_REFERENCE_KIND_NAMED && + uacpi_object_is(parent->inner_object, UACPI_OBJECT_OPERATION_REGION)) { + + /* + * We're doing a CopyObject or similar to a namespace node that is an + * operation region. Try to find the parent node and manually detach + * the handler. + */ + opregion_try_detach_from_parent(parent, uacpi_namespace_root(), 0); + uacpi_namespace_do_for_each_child( + uacpi_namespace_root(), opregion_try_detach_from_parent, UACPI_NULL, + UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_NO, UACPI_PERMANENT_ONLY_NO, parent + ); + } + uacpi_object_detach_child(parent); uacpi_object_attach_child(parent, new_child); } @@ -2454,7 +2513,7 @@ static uacpi_status ensure_valid_idx(uacpi_object *obj, uacpi_size idx, return UACPI_STATUS_OK; uacpi_error( - "Invalid index %zu, %s@%p has %zu elements\n", + "invalid index %zu, %s@%p has %zu elements\n", idx, uacpi_object_type_to_string(obj->type), obj, src_size ); return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX; @@ -2535,7 +2594,7 @@ static uacpi_status handle_index(struct execution_context *ctx) } default: uacpi_error( - "Invalid argument for Index: %s, " + "invalid argument for Index: %s, " "expected String/Buffer/Package\n", uacpi_object_type_to_string(src->type) ); @@ -2794,7 +2853,7 @@ static uacpi_status handle_mid(struct execution_context *ctx) if (uacpi_unlikely(src->type != UACPI_OBJECT_STRING && src->type != UACPI_OBJECT_BUFFER)) { uacpi_error( - "Invalid argument for Mid: %s, expected String/Buffer\n", + "invalid argument for Mid: %s, expected String/Buffer\n", uacpi_object_type_to_string(src->type) ); return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; @@ -3017,7 +3076,7 @@ static uacpi_status handle_sizeof(struct execution_context *ctx) default: uacpi_error( - "Invalid argument for Sizeof: %s, " + "invalid argument for Sizeof: %s, " "expected String/Buffer/Package\n", uacpi_object_type_to_string(src->type) ); @@ -3051,7 +3110,7 @@ static uacpi_status handle_timer(struct execution_context *ctx) uacpi_object *dst; dst = item_array_at(&op_ctx->items, 0)->obj; - dst->integer = uacpi_kernel_get_ticks(); + dst->integer = uacpi_kernel_get_nanoseconds_since_boot() / 100; return UACPI_STATUS_OK; } @@ -3071,10 +3130,13 @@ static uacpi_status handle_stall_or_sleep(struct execution_context *ctx) if (time > 2000) time = 2000; + uacpi_namespace_write_unlock(); uacpi_kernel_sleep(time); + uacpi_namespace_write_lock(); } else { // Spec says this must evaluate to a ByteData - time &= 0xFF; + if (time > 0xFF) + time = 0xFF; uacpi_kernel_stall(time); } @@ -3380,7 +3442,7 @@ static uacpi_status handle_create_method(struct execution_context *ctx) struct package_length *pkg; struct uacpi_namespace_node *node; struct uacpi_object *dst; - uacpi_u32 method_begin_offset; + uacpi_u32 method_begin_offset, method_size; this_method = ctx->cur_frame->method; pkg = &item_array_at(&op_ctx->items, 0)->pkg; @@ -3400,11 +3462,23 @@ static uacpi_status handle_create_method(struct execution_context *ctx) dst = item_array_at(&op_ctx->items, 4)->obj; method = dst->method; - init_method_flags(method, item_array_at(&op_ctx->items, 2)->immediate); + method_size = pkg->end - method_begin_offset; - method->code = ctx->cur_frame->method->code; - method->code += method_begin_offset; - method->size = pkg->end - method_begin_offset; + if (method_size) { + method->code = uacpi_kernel_alloc(method_size); + if (uacpi_unlikely(method->code == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + uacpi_memcpy( + method->code, + ctx->cur_frame->method->code + method_begin_offset, + method_size + ); + method->size = method_size; + method->owns_code = 1; + } + + init_method_flags(method, item_array_at(&op_ctx->items, 2)->immediate); node->object = uacpi_create_internal_reference(UACPI_REFERENCE_KIND_NAMED, dst); @@ -3452,7 +3526,7 @@ static uacpi_status handle_event_ctl(struct execution_context *ctx) ); if (uacpi_unlikely(obj->type != UACPI_OBJECT_EVENT)) { uacpi_error( - "%s: Invalid argument '%s', expected an Event object\n", + "%s: invalid argument '%s', expected an Event object\n", op_ctx->op->name, uacpi_object_type_to_string(obj->type) ); return UACPI_STATUS_AML_INCOMPATIBLE_OBJECT_TYPE; @@ -3474,7 +3548,9 @@ static uacpi_status handle_event_ctl(struct execution_context *ctx) if (timeout > 0xFFFF) timeout = 0xFFFF; + uacpi_namespace_write_unlock(); ret = uacpi_kernel_wait_for_event(obj->event->handle, timeout); + uacpi_namespace_write_lock(); /* * The return value here is inverted, we return 0 for success and Ones @@ -3530,12 +3606,14 @@ static uacpi_status handle_mutex_ctl(struct execution_context *ctx) timeout = 0xFFFF; if (uacpi_this_thread_owns_aml_mutex(obj->mutex)) { - if (uacpi_likely(uacpi_acquire_aml_mutex(obj->mutex, timeout))) + ret = uacpi_acquire_aml_mutex(obj->mutex, timeout); + if (uacpi_likely_success(ret)) *return_value = 0; break; } - if (!uacpi_acquire_aml_mutex(obj->mutex, timeout)) + ret = uacpi_acquire_aml_mutex(obj->mutex, timeout); + if (uacpi_unlikely_error(ret)) break; ret = held_mutexes_array_push(&ctx->held_mutexes, obj->mutex); @@ -3641,7 +3719,10 @@ static uacpi_status handle_firmware_request(struct execution_context *ctx) return UACPI_STATUS_INVALID_ARGUMENT; } + uacpi_namespace_write_unlock(); uacpi_kernel_handle_firmware_request(&req); + uacpi_namespace_write_lock(); + return UACPI_STATUS_OK; } @@ -3801,7 +3882,7 @@ static uacpi_status handle_create_buffer_field(struct execution_context *ctx) if (uacpi_unlikely((field->bit_index + field->bit_length) > src_buf->size * 8)) { uacpi_error( - "Invalid buffer field: bits [%zu..%zu], buffer size is %zu bytes\n", + "invalid buffer field: bits [%zu..%zu], buffer size is %zu bytes\n", field->bit_index, field->bit_index + field->bit_length, src_buf->size ); @@ -4224,6 +4305,8 @@ static uacpi_status enter_method( { uacpi_status ret = UACPI_STATUS_OK; + uacpi_shareable_ref(method); + if (!method->is_serialized) return ret; @@ -4244,8 +4327,9 @@ static uacpi_status enter_method( } if (!uacpi_this_thread_owns_aml_mutex(method->mutex)) { - if (uacpi_unlikely(!uacpi_acquire_aml_mutex(method->mutex, 0xFFFF))) - return UACPI_STATUS_INTERNAL_ERROR; + ret = uacpi_acquire_aml_mutex(method->mutex, 0xFFFF); + if (uacpi_unlikely_error(ret)) + return ret; ret = held_mutexes_array_push(&ctx->held_mutexes, method->mutex); if (uacpi_unlikely_error(ret)) { @@ -4284,7 +4368,8 @@ static uacpi_bool pop_item(struct op_context *op_ctx) if (item->type == ITEM_OBJECT) uacpi_object_unref(item->obj); - if (item->type == ITEM_NAMESPACE_NODE_METHOD_LOCAL) + + if (item->type == ITEM_NAMESPACE_NODE) uacpi_namespace_node_unref(item->node); item_array_pop(&op_ctx->items); @@ -4313,7 +4398,7 @@ static void call_frame_clear(struct call_frame *frame) uacpi_namespace_node *node; node = *temp_namespace_node_array_last(&frame->temp_nodes); - uacpi_node_uninstall(node); + uacpi_namespace_node_uninstall(node); temp_namespace_node_array_pop(&frame->temp_nodes); } temp_namespace_node_array_clear(&frame->temp_nodes); @@ -4322,6 +4407,8 @@ static void call_frame_clear(struct call_frame *frame) uacpi_object_unref(frame->args[i]); for (i = 0; i < 8; ++i) uacpi_object_unref(frame->locals[i]); + + uacpi_method_unref(frame->method); } static uacpi_u8 parse_op_generates_item[0x100] = { @@ -4338,8 +4425,8 @@ static uacpi_u8 parse_op_generates_item[0x100] = { [UACPI_PARSE_OP_TARGET] = ITEM_EMPTY_OBJECT, [UACPI_PARSE_OP_PKGLEN] = ITEM_PACKAGE_LENGTH, [UACPI_PARSE_OP_TRACKED_PKGLEN] = ITEM_PACKAGE_LENGTH, - [UACPI_PARSE_OP_CREATE_NAMESTRING] = ITEM_NAMESPACE_NODE_METHOD_LOCAL, - [UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE_METHOD_LOCAL, + [UACPI_PARSE_OP_CREATE_NAMESTRING] = ITEM_NAMESPACE_NODE, + [UACPI_PARSE_OP_CREATE_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE, [UACPI_PARSE_OP_EXISTING_NAMESTRING] = ITEM_NAMESPACE_NODE, [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL] = ITEM_NAMESPACE_NODE, [UACPI_PARSE_OP_EXISTING_NAMESTRING_OR_NULL_IF_LOAD] = ITEM_NAMESPACE_NODE, @@ -4897,7 +4984,7 @@ enum method_call_type { static uacpi_status prepare_method_call( struct execution_context *ctx, uacpi_namespace_node *node, uacpi_control_method *method, enum method_call_type type, - const uacpi_args *args + const uacpi_object_array *args ) { uacpi_status ret; @@ -5271,7 +5358,7 @@ static uacpi_status exec_op(struct execution_context *ctx) enum uacpi_log_level lvl = UACPI_LOG_ERROR; uacpi_status trace_ret = ret; - if (ctx->cur_frame->method->named_objects_persist) { + if (frame->method->named_objects_persist) { uacpi_bool is_ok; is_ok = op_allows_unresolved_if_load(op); @@ -5292,6 +5379,10 @@ static uacpi_status exec_op(struct execution_context *ctx) ret = UACPI_STATUS_AML_UNDEFINED_REFERENCE; } + if (behavior == RESOLVE_CREATE_LAST_NAMESEG_FAIL_IF_EXISTS && + !frame->method->named_objects_persist) + item->node->flags |= UACPI_NAMESPACE_NODE_FLAG_TEMPORARY; + break; } @@ -5517,17 +5608,16 @@ static uacpi_status exec_op(struct execution_context *ctx) static void ctx_reload_post_ret(struct execution_context *ctx) { - call_frame_clear(ctx->cur_frame); + uacpi_control_method *method = ctx->cur_frame->method; - if (ctx->cur_frame->method->is_serialized) { + if (method->is_serialized) { held_mutexes_array_remove_and_release( - &ctx->held_mutexes, - ctx->cur_frame->method->mutex, - FORCE_RELEASE_YES + &ctx->held_mutexes, method->mutex, FORCE_RELEASE_YES ); ctx->sync_level = ctx->cur_frame->prev_sync_level; } + call_frame_clear(ctx->cur_frame); call_frame_array_pop(&ctx->call_stack); ctx->cur_frame = call_frame_array_last(&ctx->call_stack); @@ -5615,7 +5705,7 @@ static void execution_context_release(struct execution_context *ctx) uacpi_status uacpi_execute_control_method( uacpi_namespace_node *scope, uacpi_control_method *method, - const uacpi_args *args, uacpi_object **out_obj + const uacpi_object_array *args, uacpi_object **out_obj ) { uacpi_status ret = UACPI_STATUS_OK; diff --git a/drivers/bus/acpi_new/source/io.c b/drivers/bus/acpi_new/source/io.c index f68d6c1eb7ba5..7757cf1650718 100644 --- a/drivers/bus/acpi_new/source/io.c +++ b/drivers/bus/acpi_new/source/io.c @@ -4,6 +4,7 @@ #include #include #include +#include uacpi_size uacpi_round_up_bits_to_bytes(uacpi_size bit_length) { @@ -173,74 +174,6 @@ void uacpi_write_buffer_field( do_write_misaligned_buffer_field(field, src, size); } -static uacpi_status dispatch_field_io( - uacpi_namespace_node *region_node, uacpi_u32 offset, uacpi_u8 byte_width, - uacpi_region_op op, uacpi_u64 *in_out -) -{ - uacpi_status ret; - uacpi_operation_region *region; - uacpi_address_space_handler *handler; - uacpi_u64 offset_end; - - uacpi_region_rw_data data = { - .byte_width = byte_width, - .offset = offset, - }; - - ret = uacpi_opregion_attach(region_node); - if (uacpi_unlikely_error(ret)) { - uacpi_trace_region_error( - region_node, "unable to attach", ret - ); - return ret; - } - - region = uacpi_namespace_node_get_object(region_node)->op_region; - handler = region->handler; - - offset_end = offset; - offset_end += byte_width; - data.offset += region->offset; - - if (uacpi_unlikely(region->length < offset_end || - data.offset < offset)) { - const uacpi_char *path; - - path = uacpi_namespace_node_generate_absolute_path(region_node); - uacpi_error( - "out-of-bounds access to opregion %s[0x%"UACPI_PRIX64"->" - "0x%"UACPI_PRIX64"] at 0x%"UACPI_PRIX64" (idx=%u, width=%d)\n", - path, UACPI_FMT64(region->offset), - UACPI_FMT64(region->offset + region->length), - UACPI_FMT64(data.offset), offset, byte_width - ); - uacpi_free_dynamic_string(path); - return UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX; - } - - data.handler_context = handler->user_context; - data.region_context = region->user_context; - - if (op == UACPI_REGION_OP_WRITE) { - data.value = *in_out; - uacpi_trace_region_io(region_node, op, data.offset, - byte_width, data.value); - } - - ret = handler->callback(op, &data); - if (uacpi_unlikely_error(ret)) - return ret; - - if (op == UACPI_REGION_OP_READ) { - *in_out = data.value; - uacpi_trace_region_io(region_node, op, data.offset, - byte_width, data.value); - } - - return UACPI_STATUS_OK; -} - static uacpi_status access_field_unit( uacpi_field_unit *field, uacpi_u32 offset, uacpi_region_op op, uacpi_u64 *in_out @@ -248,20 +181,13 @@ static uacpi_status access_field_unit( { uacpi_status ret = UACPI_STATUS_OK; uacpi_namespace_node *region_node; - uacpi_mutex *gl = UACPI_NULL; if (field->lock_rule) { - uacpi_namespace_node *gl_node; - uacpi_object *obj; - - gl_node = uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_GL); - obj = uacpi_namespace_node_get_object(gl_node); - - if (uacpi_likely(obj != UACPI_NULL && obj->type == UACPI_OBJECT_MUTEX)) - gl = obj->mutex; - - if (uacpi_unlikely(!uacpi_acquire_aml_mutex(gl, 0xFFFF))) - return UACPI_STATUS_INTERNAL_ERROR; + ret = uacpi_acquire_aml_mutex( + g_uacpi_rt_ctx.global_lock_mutex, 0xFFFF + ); + if (uacpi_unlikely_error(ret)) + return ret; } switch (field->kind) { @@ -283,18 +209,22 @@ static uacpi_status access_field_unit( switch (op) { case UACPI_REGION_OP_READ: - return uacpi_read_field_unit( + ret = uacpi_read_field_unit( field->data, in_out, field->access_width_bytes ); + break; case UACPI_REGION_OP_WRITE: - return uacpi_write_field_unit( + ret = uacpi_write_field_unit( field->data, in_out, field->access_width_bytes ); + break; default: ret = UACPI_STATUS_INVALID_ARGUMENT; - goto out; + break; } + goto out; + default: uacpi_error("invalid field unit kind %d\n", field->kind); ret = UACPI_STATUS_INVALID_ARGUMENT; @@ -303,13 +233,13 @@ static uacpi_status access_field_unit( if (uacpi_unlikely_error(ret)) goto out; - ret = dispatch_field_io( + ret = uacpi_dispatch_opregion_io( region_node, offset, field->access_width_bytes, op, in_out ); out: - if (gl != UACPI_NULL) - uacpi_release_aml_mutex(gl); + if (field->lock_rule) + uacpi_release_aml_mutex(g_uacpi_rt_ctx.global_lock_mutex); return ret; } @@ -591,13 +521,16 @@ uacpi_status uacpi_gas_read(const struct acpi_gas *gas, uacpi_u64 *out_value) uacpi_u64 address = gas->address + (index * access_byte_width); if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO) { - ret = uacpi_kernel_raw_io_read( - address, access_byte_width, &data - ); + ret = uacpi_system_io_read(address, access_byte_width, &data); } else { - ret = uacpi_kernel_raw_memory_read( - address, access_byte_width, &data - ); + void *virt; + + virt = uacpi_kernel_map(address, access_byte_width); + if (uacpi_unlikely(virt == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + ret = uacpi_system_memory_read(virt, access_byte_width, &data); + uacpi_kernel_unmap(virt, access_bit_width); } if (uacpi_unlikely_error(ret)) return ret; @@ -638,13 +571,16 @@ uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 in_value) uacpi_u64 address = gas->address + (index * access_byte_width); if (gas->address_space_id == UACPI_ADDRESS_SPACE_SYSTEM_IO) { - ret = uacpi_kernel_raw_io_write( - address, access_byte_width, data - ); + ret = uacpi_system_io_write(address, access_byte_width, data); } else { - ret = uacpi_kernel_raw_memory_write( - address, access_byte_width, data - ); + void *virt; + + virt = uacpi_kernel_map(address, access_byte_width); + if (uacpi_unlikely(virt == UACPI_NULL)) + return UACPI_STATUS_MAPPING_FAILED; + + ret = uacpi_system_memory_write(virt, access_byte_width, data); + uacpi_kernel_unmap(virt, access_bit_width); } if (uacpi_unlikely_error(ret)) return ret; @@ -656,3 +592,81 @@ uacpi_status uacpi_gas_write(const struct acpi_gas *gas, uacpi_u64 in_value) return UACPI_STATUS_OK; } + +uacpi_status uacpi_system_io_read( + uacpi_io_addr address, uacpi_u8 width, uacpi_u64 *out +) +{ + uacpi_status ret; + uacpi_handle handle; + + ret = uacpi_kernel_io_map(address, width, &handle); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_kernel_io_read(handle, 0, width, out); + uacpi_kernel_io_unmap(handle); + + return ret; +} + +uacpi_status uacpi_system_io_write( + uacpi_io_addr address, uacpi_u8 width, uacpi_u64 in +) +{ + uacpi_status ret; + uacpi_handle handle; + + ret = uacpi_kernel_io_map(address, width, &handle); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_kernel_io_write(handle, 0, width, in); + uacpi_kernel_io_unmap(handle); + + return ret; +} + +uacpi_status uacpi_system_memory_read(void *ptr, uacpi_u8 width, uacpi_u64 *out) +{ + switch (width) { + case 1: + *out = *(volatile uacpi_u8*)ptr; + break; + case 2: + *out = *(volatile uacpi_u16*)ptr; + break; + case 4: + *out = *(volatile uacpi_u32*)ptr; + break; + case 8: + *out = *(volatile uacpi_u64*)ptr; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_system_memory_write(void *ptr, uacpi_u8 width, uacpi_u64 in) +{ + switch (width) { + case 1: + *(volatile uacpi_u8*)ptr = in; + break; + case 2: + *(volatile uacpi_u16*)ptr = in; + break; + case 4: + *(volatile uacpi_u32*)ptr = in; + break; + case 8: + *(volatile uacpi_u64*)ptr = in; + break; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } + + return UACPI_STATUS_OK; +} diff --git a/drivers/bus/acpi_new/source/mutex.c b/drivers/bus/acpi_new/source/mutex.c index 43871364970df..ee4e6fc9a3c00 100644 --- a/drivers/bus/acpi_new/source/mutex.c +++ b/drivers/bus/acpi_new/source/mutex.c @@ -4,8 +4,9 @@ #include #include #include +#include -#if UACPI_REDUCED_HARDWARE == 0 +#ifndef UACPI_REDUCED_HARDWARE #define GLOBAL_LOCK_PENDING (1 << 0) @@ -120,9 +121,31 @@ UACPI_STUB_IF_REDUCED_HARDWARE( void uacpi_release_global_lock_to_firmware(void) ) +uacpi_status uacpi_acquire_native_mutex_with_timeout( + uacpi_handle mtx, uacpi_u16 timeout +) +{ + uacpi_status ret; + + if (uacpi_unlikely(mtx == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_kernel_acquire_mutex(mtx, timeout); + if (uacpi_likely_success(ret)) + return ret; + + if (uacpi_unlikely(ret != UACPI_STATUS_TIMEOUT || timeout == 0xFFFF)) { + uacpi_error( + "unexpected status %08X (%s) while acquiring %p (timeout=%04X)\n", + ret, uacpi_status_to_string(ret), mtx, timeout + ); + } + + return ret; +} + uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq) { - uacpi_bool did_acquire; uacpi_status ret; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); @@ -130,15 +153,15 @@ uacpi_status uacpi_acquire_global_lock(uacpi_u16 timeout, uacpi_u32 *out_seq) if (uacpi_unlikely(out_seq == UACPI_NULL)) return UACPI_STATUS_INVALID_ARGUMENT; - UACPI_MUTEX_ACQUIRE_WITH_TIMEOUT( - g_uacpi_rt_ctx.global_lock_mutex, timeout, did_acquire + ret = uacpi_acquire_native_mutex_with_timeout( + g_uacpi_rt_ctx.global_lock_mutex->handle, timeout ); - if (!did_acquire) - return UACPI_STATUS_TIMEOUT; + if (ret != UACPI_STATUS_OK) + return ret; ret = uacpi_acquire_global_lock_from_firmware(); if (uacpi_unlikely_error(ret)) { - UACPI_MUTEX_RELEASE(g_uacpi_rt_ctx.global_lock_mutex); + uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle); return ret; } @@ -160,7 +183,7 @@ uacpi_status uacpi_release_global_lock(uacpi_u32 seq) g_uacpi_rt_ctx.global_lock_acquired = UACPI_FALSE; uacpi_release_global_lock_to_firmware(); - UACPI_MUTEX_RELEASE(g_uacpi_rt_ctx.global_lock_mutex); + uacpi_release_native_mutex(g_uacpi_rt_ctx.global_lock_mutex->handle); return UACPI_STATUS_OK; } @@ -173,10 +196,10 @@ uacpi_bool uacpi_this_thread_owns_aml_mutex(uacpi_mutex *mutex) return id == uacpi_kernel_get_thread_id(); } -uacpi_bool uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout) +uacpi_status uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout) { uacpi_thread_id this_id; - uacpi_bool did_acquire; + uacpi_status ret = UACPI_STATUS_OK; this_id = uacpi_kernel_get_thread_id(); if (UACPI_ATOMIC_LOAD_THREAD_ID(&mutex->owner) == this_id) { @@ -185,40 +208,185 @@ uacpi_bool uacpi_acquire_aml_mutex(uacpi_mutex *mutex, uacpi_u16 timeout) "failing an attempt to acquire mutex @%p, too many recursive " "acquires\n", mutex ); - return UACPI_FALSE; + return UACPI_STATUS_DENIED; } mutex->depth++; - return UACPI_TRUE; + return ret; } - UACPI_MUTEX_ACQUIRE_WITH_TIMEOUT(mutex->handle, timeout, did_acquire); - if (!did_acquire) - return UACPI_FALSE; - - if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex) { - uacpi_status ret; + uacpi_namespace_write_unlock(); + ret = uacpi_acquire_native_mutex_with_timeout(mutex->handle, timeout); + if (ret != UACPI_STATUS_OK) + goto out; + if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) { ret = uacpi_acquire_global_lock_from_firmware(); if (uacpi_unlikely_error(ret)) { - UACPI_MUTEX_RELEASE(mutex->handle); - return UACPI_FALSE; + uacpi_release_native_mutex(mutex->handle); + goto out; } } UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, this_id); mutex->depth = 1; - return UACPI_TRUE; + +out: + uacpi_namespace_write_lock(); + return ret; } -void uacpi_release_aml_mutex(uacpi_mutex *mutex) +uacpi_status uacpi_release_aml_mutex(uacpi_mutex *mutex) { if (mutex->depth-- > 1) - return; + return UACPI_STATUS_OK; - if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex) + if (mutex->handle == g_uacpi_rt_ctx.global_lock_mutex->handle) uacpi_release_global_lock_to_firmware(); UACPI_ATOMIC_STORE_THREAD_ID(&mutex->owner, UACPI_THREAD_ID_NONE); - UACPI_MUTEX_RELEASE(mutex->handle); + uacpi_release_native_mutex(mutex->handle); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_recursive_lock_init(struct uacpi_recursive_lock *lock) +{ + lock->mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(lock->mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + lock->owner = UACPI_THREAD_ID_NONE; + lock->depth = 0; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_recursive_lock_deinit(struct uacpi_recursive_lock *lock) +{ + if (uacpi_unlikely(lock->depth)) { + uacpi_warn( + "de-initializing active recursive lock %p with depth=%zu\n", + lock, lock->depth + ); + lock->depth = 0; + } + + lock->owner = UACPI_THREAD_ID_NONE; + + if (lock->mutex != UACPI_NULL) { + uacpi_kernel_free_mutex(lock->mutex); + lock->mutex = UACPI_NULL; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_recursive_lock_acquire(struct uacpi_recursive_lock *lock) +{ + uacpi_thread_id this_id; + uacpi_status ret = UACPI_STATUS_OK; + + this_id = uacpi_kernel_get_thread_id(); + if (UACPI_ATOMIC_LOAD_THREAD_ID(&lock->owner) == this_id) { + lock->depth++; + return ret; + } + + ret = uacpi_acquire_native_mutex(lock->mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, this_id); + lock->depth = 1; + return ret; +} + +uacpi_status uacpi_recursive_lock_release(struct uacpi_recursive_lock *lock) +{ + if (lock->depth-- > 1) + return UACPI_STATUS_OK; + + UACPI_ATOMIC_STORE_THREAD_ID(&lock->owner, UACPI_THREAD_ID_NONE); + return uacpi_release_native_mutex(lock->mutex); +} + +uacpi_status uacpi_rw_lock_init(struct uacpi_rw_lock *lock) +{ + lock->read_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(lock->read_mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + lock->write_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(lock->write_mutex == UACPI_NULL)) { + uacpi_kernel_free_mutex(lock->read_mutex); + lock->read_mutex = UACPI_NULL; + return UACPI_STATUS_OUT_OF_MEMORY; + } + + lock->num_readers = 0; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_rw_lock_deinit(struct uacpi_rw_lock *lock) +{ + if (uacpi_unlikely(lock->num_readers)) { + uacpi_warn("de-initializing rw_lock %p with %zu active readers\n", + lock, lock->num_readers); + lock->num_readers = 0; + } + + if (lock->read_mutex != UACPI_NULL) { + uacpi_kernel_free_mutex(lock->read_mutex); + lock->read_mutex = UACPI_NULL; + } + if (lock->write_mutex != UACPI_NULL) { + uacpi_kernel_free_mutex(lock->write_mutex); + lock->write_mutex = UACPI_NULL; + } + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_rw_lock_read(struct uacpi_rw_lock *lock) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex(lock->read_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (lock->num_readers++ == 0) { + ret = uacpi_acquire_native_mutex(lock->write_mutex); + if (uacpi_unlikely_error(ret)) + lock->num_readers = 0; + } + + uacpi_kernel_release_mutex(lock->read_mutex); + return ret; +} + +uacpi_status uacpi_rw_unlock_read(struct uacpi_rw_lock *lock) +{ + uacpi_status ret; + + ret = uacpi_acquire_native_mutex(lock->read_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + + if (lock->num_readers-- == 1) + uacpi_release_native_mutex(lock->write_mutex); + + uacpi_kernel_release_mutex(lock->read_mutex); + return ret; +} + +uacpi_status uacpi_rw_lock_write(struct uacpi_rw_lock *lock) +{ + return uacpi_acquire_native_mutex(lock->write_mutex); +} + +uacpi_status uacpi_rw_unlock_write(struct uacpi_rw_lock *lock) +{ + return uacpi_release_native_mutex(lock->write_mutex); } diff --git a/drivers/bus/acpi_new/source/namespace.c b/drivers/bus/acpi_new/source/namespace.c index 37f25bbfe7af4..585d1df20ddbf 100644 --- a/drivers/bus/acpi_new/source/namespace.c +++ b/drivers/bus/acpi_new/source/namespace.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #define UACPI_REV_VALUE 2 @@ -31,6 +32,28 @@ predefined_namespaces[UACPI_PREDEFINED_NAMESPACE_MAX + 1] = { [UACPI_PREDEFINED_NAMESPACE_REV] = MAKE_PREDEFINED("_REV"), }; +static struct uacpi_rw_lock namespace_lock; + +uacpi_status uacpi_namespace_read_lock(void) +{ + return uacpi_rw_lock_read(&namespace_lock); +} + +uacpi_status uacpi_namespace_read_unlock(void) +{ + return uacpi_rw_unlock_read(&namespace_lock); +} + +uacpi_status uacpi_namespace_write_lock(void) +{ + return uacpi_rw_lock_write(&namespace_lock); +} + +uacpi_status uacpi_namespace_write_unlock(void) +{ + return uacpi_rw_unlock_write(&namespace_lock); +} + static uacpi_object *make_object_for_predefined( enum uacpi_predefined_namespace ns ) @@ -39,16 +62,17 @@ static uacpi_object *make_object_for_predefined( switch (ns) { case UACPI_PREDEFINED_NAMESPACE_ROOT: - obj = uacpi_create_object(UACPI_OBJECT_DEVICE); - if (uacpi_unlikely(obj == UACPI_NULL)) - return obj; - /* - * Erase the type here so that code like ObjectType(\) returns - * the spec-compliant result of 0. We still create it as device - * so that it is able to store global address space & notify handlers. + * The real root object is stored in the global context, whereas the \ + * node gets a placeholder uninitialized object instead. This is to + * protect against CopyObject(JUNK, \), so that all of the opregion and + * notify handlers are preserved if AML decides to do that. */ - obj->type = UACPI_OBJECT_UNINITIALIZED; + g_uacpi_rt_ctx.root_object = uacpi_create_object(UACPI_OBJECT_DEVICE); + if (uacpi_unlikely(g_uacpi_rt_ctx.root_object == UACPI_NULL)) + return UACPI_NULL; + + obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); break; case UACPI_PREDEFINED_NAMESPACE_OS: @@ -76,8 +100,10 @@ static uacpi_object *make_object_for_predefined( case UACPI_PREDEFINED_NAMESPACE_GL: obj = uacpi_create_object(UACPI_OBJECT_MUTEX); - if (uacpi_likely(obj != UACPI_NULL)) - g_uacpi_rt_ctx.global_lock_mutex = obj->mutex->handle; + if (uacpi_likely(obj != UACPI_NULL)) { + uacpi_shareable_ref(obj->mutex); + g_uacpi_rt_ctx.global_lock_mutex = obj->mutex; + } break; case UACPI_PREDEFINED_NAMESPACE_OSI: @@ -98,12 +124,23 @@ static uacpi_object *make_object_for_predefined( return obj; } -static void free_namespace_node(uacpi_handle handle) +static void namespace_node_detach_object(uacpi_namespace_node *node) { - uacpi_namespace_node *node = handle; + uacpi_object *object; + + object = uacpi_namespace_node_get_object(node); + if (object != UACPI_NULL) { + if (object->type == UACPI_OBJECT_OPERATION_REGION) + uacpi_opregion_uninstall_handler(node); - if (node->object) uacpi_object_unref(node->object); + node->object = UACPI_NULL; + } +} + +static void free_namespace_node(uacpi_handle handle) +{ + uacpi_namespace_node *node = handle; if (uacpi_likely(!uacpi_namespace_node_is_predefined(node))) { uacpi_free(node, sizeof(*node)); @@ -122,6 +159,11 @@ uacpi_status uacpi_initialize_namespace(void) enum uacpi_predefined_namespace ns; uacpi_object *obj; uacpi_namespace_node *node; + uacpi_status ret; + + ret = uacpi_rw_lock_init(&namespace_lock); + if (uacpi_unlikely_error(ret)) + return ret; for (ns = 0; ns <= UACPI_PREDEFINED_NAMESPACE_MAX; ns++) { node = &predefined_namespaces[ns]; @@ -153,7 +195,9 @@ uacpi_status uacpi_initialize_namespace(void) uacpi_check_flag(UACPI_FLAG_NO_OSI)) continue; - uacpi_node_install(uacpi_namespace_root(), &predefined_namespaces[ns]); + uacpi_namespace_node_install( + uacpi_namespace_root(), &predefined_namespaces[ns] + ); } return UACPI_STATUS_OK; @@ -161,12 +205,14 @@ uacpi_status uacpi_initialize_namespace(void) void uacpi_deinitialize_namespace(void) { + uacpi_status ret; uacpi_namespace_node *current, *next = UACPI_NULL; - uacpi_object *obj; uacpi_u32 depth = 1; current = uacpi_namespace_root(); + ret = uacpi_namespace_write_lock(); + while (depth) { next = next == UACPI_NULL ? current->child : next->next; @@ -179,7 +225,7 @@ void uacpi_deinitialize_namespace(void) // Wipe the subtree while (current->child != UACPI_NULL) - uacpi_node_uninstall(current->child); + uacpi_namespace_node_uninstall(current->child); // Reset the pointers back as if this iteration never happened next = current; @@ -201,16 +247,19 @@ void uacpi_deinitialize_namespace(void) // This node has no children, move on to its peer } - /* - * Set the type back to DEVICE as that's what this node contained originally - * See make_object_for_predefined() for root for reasoning - */ - current = uacpi_namespace_root(); - obj = uacpi_namespace_node_get_object(current); - if (obj != UACPI_NULL && obj->type == UACPI_OBJECT_UNINITIALIZED) - obj->type = UACPI_OBJECT_DEVICE; - + namespace_node_detach_object(uacpi_namespace_root()); free_namespace_node(uacpi_namespace_root()); + + if (ret == UACPI_STATUS_OK) + uacpi_namespace_write_unlock(); + + uacpi_object_unref(g_uacpi_rt_ctx.root_object); + g_uacpi_rt_ctx.root_object = UACPI_NULL; + + uacpi_mutex_unref(g_uacpi_rt_ctx.global_lock_mutex); + g_uacpi_rt_ctx.global_lock_mutex = UACPI_NULL; + + uacpi_rw_lock_deinit(&namespace_lock); } uacpi_namespace_node *uacpi_namespace_root(void) @@ -248,7 +297,7 @@ void uacpi_namespace_node_unref(uacpi_namespace_node *node) uacpi_shareable_unref_and_delete_if_last(node, free_namespace_node); } -uacpi_status uacpi_node_install( +uacpi_status uacpi_namespace_node_install( uacpi_namespace_node *parent, uacpi_namespace_node *node ) @@ -277,33 +326,64 @@ uacpi_status uacpi_node_install( return UACPI_STATUS_OK; } +uacpi_bool uacpi_namespace_node_is_alias(uacpi_namespace_node *node) +{ + return node->flags & UACPI_NAMESPACE_NODE_FLAG_ALIAS; +} + uacpi_bool uacpi_namespace_node_is_dangling(uacpi_namespace_node *node) { return node->flags & UACPI_NAMESPACE_NODE_FLAG_DANGLING; } +uacpi_bool uacpi_namespace_node_is_temporary(uacpi_namespace_node *node) +{ + return node->flags & UACPI_NAMESPACE_NODE_FLAG_TEMPORARY; +} + uacpi_bool uacpi_namespace_node_is_predefined(uacpi_namespace_node *node) { return node->flags & UACPI_NAMESPACE_NODE_PREDEFINED; } -void uacpi_node_uninstall(uacpi_namespace_node *node) +uacpi_status uacpi_namespace_node_uninstall(uacpi_namespace_node *node) { uacpi_namespace_node *prev; - uacpi_object *object; if (uacpi_unlikely(uacpi_namespace_node_is_dangling(node))) { uacpi_warn("attempting to uninstall a dangling namespace node %.4s\n", node->name.text); - return; + return UACPI_STATUS_INTERNAL_ERROR; } + /* + * The way to trigger this is as follows: + * + * Method (FOO) { + * // Temporary device, will be deleted upon returning from FOO + * Device (\BAR) { + * } + * + * // + * // Load TBL where TBL is: + * // Scope (\BAR) { + * // Name (TEST, 123) + * // } + * // + * Load(TBL) + * } + * + * In the above example, TEST is a permanent node attached by bad AML to a + * temporary node created inside the FOO method at \BAR. The cleanup code + * will attempt to remove the \BAR device upon exit from FOO, but that is + * no longer possible as there's now a permanent child attached to it. + */ if (uacpi_unlikely(node->child != UACPI_NULL)) { uacpi_warn( - "BUG: refusing to uninstall node %.4s with a child (%.4s)\n", + "refusing to uninstall node %.4s with a child (%.4s)\n", node->name.text, node->child->name.text ); - return; + return UACPI_STATUS_DENIED; } /* @@ -336,14 +416,7 @@ void uacpi_node_uninstall(uacpi_namespace_node *node) * namespace node as well as potential infinite cycles between a namespace * node and an object. */ - object = uacpi_namespace_node_get_object(node); - if (object != UACPI_NULL) { - if (object->type == UACPI_OBJECT_OPERATION_REGION) - uacpi_opregion_uninstall_handler(node); - - uacpi_object_unref(node->object); - node->object = UACPI_NULL; - } + namespace_node_detach_object(node); prev = node->parent ? node->parent->child : UACPI_NULL; @@ -358,7 +431,7 @@ void uacpi_node_uninstall(uacpi_namespace_node *node) "trying to uninstall a node %.4s (%p) not linked to any peer\n", node->name.text, node ); - return; + return UACPI_STATUS_INTERNAL_ERROR; } prev->next = node->next; @@ -366,6 +439,8 @@ void uacpi_node_uninstall(uacpi_namespace_node *node) node->flags |= UACPI_NAMESPACE_NODE_FLAG_DANGLING; uacpi_namespace_node_unref(node); + + return UACPI_STATUS_OK; } uacpi_namespace_node *uacpi_namespace_node_find_sub_node( @@ -411,17 +486,16 @@ static uacpi_object_name segment_to_name( return out_name; } -enum may_search_above_parent { - MAY_SEARCH_ABOVE_PARENT_NO, - MAY_SEARCH_ABOVE_PARENT_YES, -}; - -static uacpi_namespace_node *uacpi_namespace_node_do_find( +uacpi_status uacpi_namespace_node_resolve( uacpi_namespace_node *parent, const uacpi_char *path, - enum may_search_above_parent may_search_above_parent + enum uacpi_should_lock should_lock, + enum uacpi_may_search_above_parent may_search_above_parent, + enum uacpi_permanent_only permanent_only, + uacpi_namespace_node **out_node ) { uacpi_namespace_node *cur_node = parent; + uacpi_status ret = UACPI_STATUS_OK; const uacpi_char *cursor = path; uacpi_size bytes_left; uacpi_char prev_char = 0; @@ -432,16 +506,24 @@ static uacpi_namespace_node *uacpi_namespace_node_do_find( bytes_left = uacpi_strlen(path); + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + for (;;) { if (bytes_left == 0) - return cur_node; + goto out; switch (*cursor) { case '\\': single_nameseg = UACPI_FALSE; - if (prev_char == '^') - goto out_invalid_path; + if (prev_char == '^') { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } cur_node = uacpi_namespace_root(); break; @@ -449,8 +531,10 @@ static uacpi_namespace_node *uacpi_namespace_node_do_find( single_nameseg = UACPI_FALSE; // Tried to go behind root - if (uacpi_unlikely(cur_node == uacpi_namespace_root())) - goto out_invalid_path; + if (uacpi_unlikely(cur_node == uacpi_namespace_root())) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } cur_node = cur_node->parent; break; @@ -488,46 +572,72 @@ static uacpi_namespace_node *uacpi_namespace_node_do_find( cur_node = uacpi_namespace_node_find_sub_node(cur_node, nameseg); if (cur_node == UACPI_NULL) { - if (may_search_above_parent == MAY_SEARCH_ABOVE_PARENT_NO || + if (may_search_above_parent == UACPI_MAY_SEARCH_ABOVE_PARENT_NO || !single_nameseg) - return cur_node; + goto out; parent = parent->parent; while (parent) { cur_node = uacpi_namespace_node_find_sub_node(parent, nameseg); if (cur_node != UACPI_NULL) - return cur_node; + goto out; parent = parent->parent; } - return cur_node; + goto out; } } - return cur_node; +out: + if (uacpi_unlikely(ret == UACPI_STATUS_INVALID_ARGUMENT)) { + uacpi_warn("invalid path '%s'\n", path); + goto out_read_unlock; + } -out_invalid_path: - uacpi_warn("invalid path '%s'\n", path); - return UACPI_NULL; + if (cur_node == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out_read_unlock; + } + + if (uacpi_namespace_node_is_temporary(cur_node) && + permanent_only == UACPI_PERMANENT_ONLY_YES) { + uacpi_warn("denying access to temporary namespace node '%.4s'\n", + cur_node->name.text); + ret = UACPI_STATUS_DENIED; + goto out_read_unlock; + } + + if (out_node != UACPI_NULL) + *out_node = cur_node; + +out_read_unlock: + if (should_lock == UACPI_SHOULD_LOCK_YES) + uacpi_namespace_read_unlock(); + return ret; } -uacpi_namespace_node *uacpi_namespace_node_find( - uacpi_namespace_node *parent, const uacpi_char *path +uacpi_status uacpi_namespace_node_find( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_namespace_node **out_node ) { - return uacpi_namespace_node_do_find( - parent, path, MAY_SEARCH_ABOVE_PARENT_NO + return uacpi_namespace_node_resolve( + parent, path, UACPI_SHOULD_LOCK_YES, UACPI_MAY_SEARCH_ABOVE_PARENT_NO, + UACPI_PERMANENT_ONLY_YES, out_node ); } -uacpi_namespace_node *uacpi_namespace_node_resolve_from_aml_namepath( - uacpi_namespace_node *scope, const uacpi_char *path +uacpi_status uacpi_namespace_node_resolve_from_aml_namepath( + uacpi_namespace_node *scope, + const uacpi_char *path, + uacpi_namespace_node **out_node ) { - return uacpi_namespace_node_do_find( - scope, path, MAY_SEARCH_ABOVE_PARENT_YES + return uacpi_namespace_node_resolve( + scope, path, UACPI_SHOULD_LOCK_YES, UACPI_MAY_SEARCH_ABOVE_PARENT_YES, + UACPI_PERMANENT_ONLY_YES, out_node ); } @@ -539,26 +649,255 @@ uacpi_object *uacpi_namespace_node_get_object(const uacpi_namespace_node *node) return uacpi_unwrap_internal_reference(node->object); } +uacpi_object *uacpi_namespace_node_get_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask +) +{ + uacpi_object *obj; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + if (!uacpi_object_is_one_of(obj, type_mask)) + return UACPI_NULL; + + return obj; +} + +uacpi_status uacpi_namespace_node_acquire_object_typed( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_object **out_obj +) +{ + uacpi_status ret; + uacpi_object *obj; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + obj = uacpi_namespace_node_get_object(node); + + if (uacpi_unlikely(obj == UACPI_NULL) || + !uacpi_object_is_one_of(obj, type_mask)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + uacpi_object_ref(obj); + *out_obj = obj; + +out: + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_acquire_object( + const uacpi_namespace_node *node, uacpi_object **out_obj +) +{ + return uacpi_namespace_node_acquire_object_typed( + node, UACPI_OBJECT_ANY_BIT, out_obj + ); +} + +enum action { + ACTION_REACQUIRE, + ACTION_PUT, +}; + +static uacpi_status object_mutate_refcount( + uacpi_object *obj, void (*cb)(uacpi_object*) +) +{ + uacpi_status ret = UACPI_STATUS_OK; + + if (uacpi_likely(!uacpi_object_is(obj, UACPI_OBJECT_REFERENCE))) { + cb(obj); + return ret; + } + + /* + * Reference objects must be (un)referenced under at least a read lock, as + * this requires walking down the entire reference chain and dropping each + * object ref-count by 1. This might race with the interpreter and + * object_replace_child in case an object in the chain is CopyObject'ed + * into. + */ + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + cb(obj); + + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_reacquire_object( + uacpi_object *obj +) +{ + return object_mutate_refcount(obj, uacpi_object_ref); +} + +uacpi_status uacpi_namespace_node_release_object(uacpi_object *obj) +{ + return object_mutate_refcount(obj, uacpi_object_unref); +} + uacpi_object_name uacpi_namespace_node_name(const uacpi_namespace_node *node) { return node->name; } -void uacpi_namespace_for_each_node_depth_first( - uacpi_namespace_node *node, - uacpi_iteration_callback callback, - void *user +uacpi_status uacpi_namespace_node_type_unlocked( + const uacpi_namespace_node *node, uacpi_object_type *out_type +) +{ + uacpi_object *obj; + + if (uacpi_unlikely(node == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_NOT_FOUND; + + *out_type = obj->type; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_namespace_node_type( + const uacpi_namespace_node *node, uacpi_object_type *out_type ) { - uacpi_bool walking_up = UACPI_FALSE; + uacpi_status ret; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_node_type_unlocked(node, out_type); + + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_is_one_of_unlocked( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, uacpi_bool *out +) +{ + uacpi_object *obj; + + if (uacpi_unlikely(node == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_NOT_FOUND; + + *out = uacpi_object_is_one_of(obj, type_mask); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_namespace_node_is_one_of( + const uacpi_namespace_node *node, uacpi_object_type_bits type_mask, + uacpi_bool *out +) +{ + uacpi_status ret; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_node_is_one_of_unlocked(node,type_mask, out); + + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_node_is( + const uacpi_namespace_node *node, uacpi_object_type type, uacpi_bool *out +) +{ + return uacpi_namespace_node_is_one_of( + node, 1u << type, out + ); +} + +uacpi_status uacpi_namespace_do_for_each_child( + uacpi_namespace_node *node, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, + enum uacpi_should_lock should_lock, + enum uacpi_permanent_only permanent_only, void *user +) +{ + uacpi_status ret = UACPI_STATUS_OK; + uacpi_iteration_decision decision; + uacpi_iteration_callback cb; + uacpi_bool walking_up = UACPI_FALSE, matches = UACPI_FALSE; uacpi_u32 depth = 1; - if (node == UACPI_NULL || node->child == UACPI_NULL) - return; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (uacpi_unlikely(descending_callback == UACPI_NULL && + ascending_callback == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (uacpi_unlikely(node == UACPI_NULL || max_depth == 0)) + return UACPI_STATUS_INVALID_ARGUMENT; + + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + + if (node->child == UACPI_NULL) + goto out; node = node->child; while (depth) { + uacpi_namespace_node_is_one_of_unlocked(node, type_mask, &matches); + if (!matches) { + decision = UACPI_ITERATION_DECISION_CONTINUE; + goto do_next; + } + + if (permanent_only == UACPI_PERMANENT_ONLY_YES && + uacpi_namespace_node_is_temporary(node)) { + decision = UACPI_ITERATION_DECISION_NEXT_PEER; + goto do_next; + } + + cb = walking_up ? ascending_callback : descending_callback; + if (cb != UACPI_NULL) { + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_unlock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + + decision = cb(user, node, depth); + if (decision == UACPI_ITERATION_DECISION_BREAK) + goto out; + + if (should_lock == UACPI_SHOULD_LOCK_YES) { + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + } + } else { + decision = UACPI_ITERATION_DECISION_CONTINUE; + } + + do_next: if (walking_up) { if (node->next) { node = node->next; @@ -571,23 +910,49 @@ void uacpi_namespace_for_each_node_depth_first( continue; } - switch (callback(user, node)) { - case UACPI_NS_ITERATION_DECISION_CONTINUE: - if (node->child) { + switch (decision) { + case UACPI_ITERATION_DECISION_CONTINUE: + if ((depth != max_depth) && (node->child != UACPI_NULL)) { node = node->child; depth++; continue; } UACPI_FALLTHROUGH; - case UACPI_NS_ITERATION_DECISION_NEXT_PEER: + case UACPI_ITERATION_DECISION_NEXT_PEER: walking_up = UACPI_TRUE; continue; - - case UACPI_NS_ITERATION_DECISION_BREAK: default: - return; + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; } } + +out: + if (should_lock == UACPI_SHOULD_LOCK_YES) + uacpi_namespace_read_unlock(); + return ret; +} + +uacpi_status uacpi_namespace_for_each_child_simple( + uacpi_namespace_node *parent, uacpi_iteration_callback callback, void *user +) +{ + return uacpi_namespace_do_for_each_child( + parent, callback, UACPI_NULL, UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, user + ); +} + +uacpi_status uacpi_namespace_for_each_child( + uacpi_namespace_node *parent, uacpi_iteration_callback descending_callback, + uacpi_iteration_callback ascending_callback, + uacpi_object_type_bits type_mask, uacpi_u32 max_depth, void *user +) +{ + return uacpi_namespace_do_for_each_child( + parent, descending_callback, ascending_callback, type_mask, max_depth, + UACPI_SHOULD_LOCK_YES, UACPI_PERMANENT_ONLY_YES, user + ); } uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node) @@ -602,6 +967,13 @@ uacpi_size uacpi_namespace_node_depth(const uacpi_namespace_node *node) return depth; } +uacpi_namespace_node *uacpi_namespace_node_parent( + uacpi_namespace_node *node +) +{ + return node->parent; +} + const uacpi_char *uacpi_namespace_node_generate_absolute_path( const uacpi_namespace_node *node ) diff --git a/drivers/bus/acpi_new/source/notify.c b/drivers/bus/acpi_new/source/notify.c index 7d913e476d1af..1a56efc99c1d7 100644 --- a/drivers/bus/acpi_new/source/notify.c +++ b/drivers/bus/acpi_new/source/notify.c @@ -2,59 +2,58 @@ #include #include #include +#include +#include #include -uacpi_handlers *uacpi_node_get_handlers( - uacpi_namespace_node *node -) +static uacpi_handle notify_mutex; + +uacpi_status uacpi_initialize_notify(void) { - uacpi_object *obj; + notify_mutex = uacpi_kernel_create_mutex(); + if (uacpi_unlikely(notify_mutex == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; - obj = uacpi_namespace_node_get_object(node); - if (uacpi_unlikely(obj == UACPI_NULL)) - return UACPI_NULL; - - switch (obj->type) { - default: - /* - * Even though the '\' object doesn't have its type set to - * UACPI_OBJECT_DEVICE, it is one. - * See namespace.c:make_object_for_predefined for reasoning. - */ - if (node != uacpi_namespace_root() || - obj->type != UACPI_OBJECT_UNINITIALIZED) - return UACPI_NULL; - UACPI_FALLTHROUGH; - case UACPI_OBJECT_DEVICE: - case UACPI_OBJECT_THERMAL_ZONE: - case UACPI_OBJECT_PROCESSOR: - return obj->handlers; - } + return UACPI_STATUS_OK; +} + +void uacpi_deinitialize_notify(void) +{ + if (notify_mutex != UACPI_NULL) + uacpi_kernel_free_mutex(notify_mutex); + + notify_mutex = UACPI_NULL; } struct notification_ctx { uacpi_namespace_node *node; uacpi_u64 value; - uacpi_device_notify_handler *node_handlers, *root_handlers; + uacpi_object *node_object; }; +static void free_notification_ctx(struct notification_ctx *ctx) +{ + uacpi_namespace_node_release_object(ctx->node_object); + uacpi_namespace_node_unref(ctx->node); + uacpi_free(ctx, sizeof(*ctx)); +} + static void do_notify(uacpi_handle opaque) { struct notification_ctx *ctx = opaque; uacpi_device_notify_handler *handler; uacpi_bool did_notify_root = UACPI_FALSE; - handler = ctx->node_handlers; + handler = ctx->node_object->handlers->notify_head; for (;;) { if (handler == UACPI_NULL) { if (did_notify_root) { - uacpi_namespace_node_unref(ctx->node); - uacpi_free(ctx, sizeof(*ctx)); + free_notification_ctx(ctx); return; } - handler = ctx->root_handlers; + handler = g_uacpi_rt_ctx.root_object->handlers->notify_head; did_notify_root = UACPI_TRUE; continue; } @@ -68,40 +67,49 @@ uacpi_status uacpi_notify_all(uacpi_namespace_node *node, uacpi_u64 value) { uacpi_status ret; struct notification_ctx *ctx; - uacpi_handlers *node_handlers, *root_handlers; + uacpi_object *node_object; - node_handlers = uacpi_node_get_handlers(node); - if (uacpi_unlikely(node_handlers == UACPI_NULL)) + node_object = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT + ); + if (uacpi_unlikely(node_object == UACPI_NULL)) return UACPI_STATUS_INVALID_ARGUMENT; - root_handlers = uacpi_node_get_handlers(uacpi_namespace_root()); + ret = uacpi_acquire_native_mutex(notify_mutex); + if (uacpi_unlikely_error(ret)) + return ret; - if (node_handlers->notify_head == UACPI_NULL && - root_handlers->notify_head == UACPI_NULL) - return UACPI_STATUS_NO_HANDLER; + if (node_object->handlers->notify_head == UACPI_NULL && + g_uacpi_rt_ctx.root_object->handlers->notify_head == UACPI_NULL) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } ctx = uacpi_kernel_alloc(sizeof(*ctx)); - if (uacpi_unlikely(ctx == UACPI_NULL)) - return UACPI_STATUS_OUT_OF_MEMORY; + if (uacpi_unlikely(ctx == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } ctx->node = node; // In case this node goes out of scope uacpi_shareable_ref(node); ctx->value = value; - ctx->node_handlers = node_handlers->notify_head; - ctx->root_handlers = root_handlers->notify_head; + ctx->node_object = uacpi_namespace_node_get_object(node); + uacpi_object_ref(ctx->node_object); ret = uacpi_kernel_schedule_work(UACPI_WORK_NOTIFICATION, do_notify, ctx); if (uacpi_unlikely_error(ret)) { uacpi_warn("unable to schedule notification work: %s\n", uacpi_status_to_string(ret)); - uacpi_namespace_node_unref(node); - uacpi_free(ctx, sizeof(*ctx)); - return ret; + free_notification_ctx(ctx); } - return UACPI_STATUS_OK; +out: + uacpi_release_native_mutex(notify_mutex); + return ret; } static uacpi_device_notify_handler *handler_container( @@ -125,14 +133,36 @@ uacpi_status uacpi_install_notify_handler( uacpi_handle handler_context ) { + uacpi_status ret; + uacpi_object *obj; uacpi_handlers *handlers; uacpi_device_notify_handler *new_handler; - handlers = uacpi_node_get_handlers(node); - if (uacpi_unlikely(handlers == UACPI_NULL)) - return UACPI_STATUS_INVALID_ARGUMENT; - if (handler_container(handlers, handler) != UACPI_NULL) - return UACPI_STATUS_ALREADY_EXISTS; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (node == uacpi_namespace_root()) { + obj = g_uacpi_rt_ctx.root_object; + } else { + ret = uacpi_namespace_node_acquire_object_typed( + node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT, &obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + } + + ret = uacpi_acquire_native_mutex(notify_mutex); + if (uacpi_unlikely_error(ret)) + goto out_no_mutex; + + uacpi_kernel_wait_for_work_completion(); + + handlers = obj->handlers; + + if (handler_container(handlers, handler) != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } new_handler = uacpi_kernel_calloc(1, sizeof(*new_handler)); if (uacpi_unlikely(new_handler == UACPI_NULL)) @@ -143,23 +173,51 @@ uacpi_status uacpi_install_notify_handler( new_handler->next = handlers->notify_head; handlers->notify_head = new_handler; - return UACPI_STATUS_OK; + +out: + uacpi_release_native_mutex(notify_mutex); +out_no_mutex: + if (node != uacpi_namespace_root()) + uacpi_object_unref(obj); + + return ret; } uacpi_status uacpi_uninstall_notify_handler( uacpi_namespace_node *node, uacpi_notify_handler handler ) { + uacpi_status ret; + uacpi_object *obj; uacpi_handlers *handlers; uacpi_device_notify_handler *containing, *prev_handler; - handlers = uacpi_node_get_handlers(node); - if (uacpi_unlikely(handlers == UACPI_NULL)) - return UACPI_STATUS_INVALID_ARGUMENT; + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + if (node == uacpi_namespace_root()) { + obj = g_uacpi_rt_ctx.root_object; + } else { + ret = uacpi_namespace_node_acquire_object_typed( + node, UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT, &obj + ); + if (uacpi_unlikely_error(ret)) + return ret; + } + + ret = uacpi_acquire_native_mutex(notify_mutex); + if (uacpi_unlikely_error(ret)) + goto out_no_mutex; + + uacpi_kernel_wait_for_work_completion(); + + handlers = obj->handlers; containing = handler_container(handlers, handler); - if (containing == UACPI_NULL) - return UACPI_STATUS_NOT_FOUND; + if (containing == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; + goto out; + } prev_handler = handlers->notify_head; @@ -180,6 +238,13 @@ uacpi_status uacpi_uninstall_notify_handler( } out: - uacpi_free(containing, sizeof(*containing)); - return UACPI_STATUS_OK; + uacpi_release_native_mutex(notify_mutex); +out_no_mutex: + if (node != uacpi_namespace_root()) + uacpi_object_unref(obj); + + if (uacpi_likely_success(ret)) + uacpi_free(containing, sizeof(*containing)); + + return ret; } diff --git a/drivers/bus/acpi_new/source/opcodes.c b/drivers/bus/acpi_new/source/opcodes.c index 5fb090567a32a..89d50e53c77cb 100644 --- a/drivers/bus/acpi_new/source/opcodes.c +++ b/drivers/bus/acpi_new/source/opcodes.c @@ -151,14 +151,16 @@ uacpi_u8 uacpi_load_table_op_decode_ops[] = { // Storage for the scope pointer, this is left as 0 in case of errors UACPI_PARSE_OP_LOAD_ZERO_IMM, UACPI_PARSE_OP_OBJECT_ALLOC_TYPED, UACPI_OBJECT_METHOD, + // Index of the table we are going to be loaded to unref it later + UACPI_PARSE_OP_LOAD_ZERO_IMM, // Storage for the target pointer, this is left as 0 if none was requested UACPI_PARSE_OP_LOAD_ZERO_IMM, UACPI_PARSE_OP_LOAD_INLINE_IMM, 1, 5, - UACPI_PARSE_OP_IF_NOT_NULL, 3, 5, + UACPI_PARSE_OP_IF_NOT_NULL, 4, 5, UACPI_PARSE_OP_STRING, - UACPI_PARSE_OP_IMM_DECREMENT, 3, - UACPI_PARSE_OP_JMP, 7, + UACPI_PARSE_OP_IMM_DECREMENT, 4, + UACPI_PARSE_OP_JMP, 8, UACPI_PARSE_OP_TERM_ARG_UNWRAP_INTERNAL, /* @@ -182,8 +184,8 @@ uacpi_u8 uacpi_load_table_op_decode_ops[] = { UACPI_PARSE_OP_INVOKE_HANDLER, // If we were given a target to store to, do the store - UACPI_PARSE_OP_IF_NOT_NULL, 2, 3, - UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 2, 9, + UACPI_PARSE_OP_IF_NOT_NULL, 3, 3, + UACPI_PARSE_OP_STORE_TO_TARGET_INDIRECT, 3, 10, UACPI_PARSE_OP_OBJECT_TRANSFER_TO_PREV, UACPI_PARSE_OP_END, diff --git a/drivers/bus/acpi_new/source/opregion.c b/drivers/bus/acpi_new/source/opregion.c index f3221e5d09374..dd852044c4dd3 100644 --- a/drivers/bus/acpi_new/source/opregion.c +++ b/drivers/bus/acpi_new/source/opregion.c @@ -1,25 +1,43 @@ -#include #include + +#include #include -#include #include #include #include +#include +#include + +struct uacpi_recursive_lock g_opregion_lock; + +uacpi_status uacpi_initialize_opregion(void) +{ + return uacpi_recursive_lock_init(&g_opregion_lock); +} + +void uacpi_deinitialize_opregion(void) +{ + uacpi_recursive_lock_deinit(&g_opregion_lock); +} void uacpi_trace_region_error( uacpi_namespace_node *node, uacpi_char *message, uacpi_status ret ) { - const uacpi_char *path; - uacpi_operation_region *op_region; + const uacpi_char *path, *space_string = ""; + uacpi_object *obj; path = uacpi_namespace_node_generate_absolute_path(node); - op_region = uacpi_namespace_node_get_object(node)->op_region; + + obj = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_likely(obj != UACPI_NULL)) + space_string = uacpi_address_space_to_string(obj->op_region->space); uacpi_error( "%s (%s) operation region %s: %s\n", - message, uacpi_address_space_to_string(op_region->space), - path, uacpi_status_to_string(ret) + message, space_string, path, uacpi_status_to_string(ret) ); uacpi_free_dynamic_string(path); } @@ -27,13 +45,12 @@ void uacpi_trace_region_error( #define UACPI_TRACE_REGION_IO void uacpi_trace_region_io( - uacpi_namespace_node *node, uacpi_region_op op, + uacpi_namespace_node *node, uacpi_address_space space, uacpi_region_op op, uacpi_u64 offset, uacpi_u8 byte_size, uacpi_u64 ret ) { #ifdef UACPI_TRACE_REGION_IO const uacpi_char *path; - uacpi_operation_region *op_region; const uacpi_char *type_str; if (!uacpi_should_log(UACPI_LOG_TRACE)) @@ -50,13 +67,12 @@ void uacpi_trace_region_io( type_str = ""; } - op_region = uacpi_namespace_node_get_object(node)->op_region; path = uacpi_namespace_node_generate_absolute_path(node); uacpi_trace( "%s [%s] (%d bytes) %s[0x%016"UACPI_PRIX64"] = 0x%"UACPI_PRIX64"\n", type_str, path, byte_size, - uacpi_address_space_to_string(op_region->space), + uacpi_address_space_to_string(space), UACPI_FMT64(offset), UACPI_FMT64(ret) ); @@ -85,8 +101,22 @@ static uacpi_status region_run_reg( ) { uacpi_status ret; - uacpi_args method_args; - uacpi_object *args[2]; + uacpi_namespace_node *reg_node; + uacpi_object_array method_args; + uacpi_object *reg_obj, *args[2]; + + ret = uacpi_namespace_node_resolve( + node->parent, "_REG", UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_NO, ®_node + ); + if (uacpi_unlikely_error(ret)) + return ret; + + reg_obj = uacpi_namespace_node_get_object_typed( + reg_node, UACPI_OBJECT_METHOD_BIT + ); + if (uacpi_unlikely(reg_obj == UACPI_NULL)) + return UACPI_STATUS_OK; args[0] = uacpi_create_object(UACPI_OBJECT_INTEGER); if (uacpi_unlikely(args[0] == UACPI_NULL)) @@ -103,8 +133,10 @@ static uacpi_status region_run_reg( method_args.objects = args; method_args.count = 2; - ret = uacpi_eval(node->parent, "_REG", &method_args, UACPI_NULL); - if (uacpi_unlikely_error(ret && ret != UACPI_STATUS_NOT_FOUND)) + ret = uacpi_execute_control_method( + reg_node, reg_obj->method, &method_args, UACPI_NULL + ); + if (uacpi_unlikely_error(ret)) uacpi_trace_region_error(node, "error during _REG execution for", ret); uacpi_object_unref(args[0]); @@ -118,25 +150,20 @@ uacpi_address_space_handlers *uacpi_node_get_address_space_handlers( { uacpi_object *object; + if (node == uacpi_namespace_root()) + return g_uacpi_rt_ctx.root_object->address_space_handlers; + object = uacpi_namespace_node_get_object(node); if (uacpi_unlikely(object == UACPI_NULL)) return UACPI_NULL; switch (object->type) { - default: - /* - * Even though the '\' object doesn't have its type set to - * UACPI_OBJECT_DEVICE, it is one. - * See namespace.c:make_object_for_predefined for reasoning. - */ - if (node != uacpi_namespace_root() || - object->type != UACPI_OBJECT_UNINITIALIZED) - return UACPI_NULL; - UACPI_FALLTHROUGH; case UACPI_OBJECT_DEVICE: case UACPI_OBJECT_PROCESSOR: case UACPI_OBJECT_THERMAL_ZONE: return object->address_space_handlers; + default: + return UACPI_NULL; } } @@ -180,6 +207,7 @@ static uacpi_operation_region *find_previous_region_link( uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node) { + uacpi_object *obj; uacpi_operation_region *region; uacpi_address_space_handler *handler; uacpi_status ret; @@ -188,7 +216,14 @@ uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node) if (uacpi_namespace_node_is_dangling(node)) return UACPI_STATUS_NAMESPACE_NODE_DANGLING; - region = uacpi_namespace_node_get_object(node)->op_region; + obj = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_INVALID_ARGUMENT; + + region = obj->op_region; + if (region->handler == UACPI_NULL) return UACPI_STATUS_NO_HANDLER; if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED) @@ -198,19 +233,26 @@ uacpi_status uacpi_opregion_attach(uacpi_namespace_node *node) attach_data.region_node = node; attach_data.handler_context = handler->user_context; + uacpi_object_ref(obj); + uacpi_namespace_write_unlock(); ret = handler->callback(UACPI_REGION_OP_ATTACH, &attach_data); + uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { uacpi_trace_region_error(node, "failed to attach a handler to", ret); + uacpi_object_unref(obj); return ret; } region->state_flags |= UACPI_OP_REGION_STATE_ATTACHED; region->user_context = attach_data.out_region_context; + uacpi_object_unref(obj); return ret; } -static void region_install_handler(uacpi_namespace_node *node, - uacpi_address_space_handler *handler) +static void region_install_handler( + uacpi_namespace_node *node, uacpi_address_space_handler *handler +) { uacpi_operation_region *region; @@ -222,14 +264,29 @@ static void region_install_handler(uacpi_namespace_node *node, handler->regions = region; } -void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node) +enum unreg { + UNREG_NO = 0, + UNREG_YES, +}; + +static void region_uninstall_handler( + uacpi_namespace_node *node, enum unreg unreg +) { + uacpi_status ret; + uacpi_object *obj; uacpi_address_space_handler *handler; uacpi_operation_region *region, *link; - region = uacpi_namespace_node_get_object(node)->op_region; - handler = region->handler; + obj = uacpi_namespace_node_get_object_typed( + node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) + return; + region = obj->op_region; + + handler = region->handler; if (handler == UACPI_NULL) return; @@ -247,14 +304,20 @@ void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node) out: if (region->state_flags & UACPI_OP_REGION_STATE_ATTACHED) { - uacpi_status ret; uacpi_region_detach_data detach_data = { .region_node = node, .region_context = region->user_context, .handler_context = handler->user_context, }; + uacpi_shareable_ref(node); + uacpi_namespace_write_unlock(); + ret = handler->callback(UACPI_REGION_OP_DETACH, &detach_data); + + uacpi_namespace_write_lock(); + uacpi_namespace_node_unref(node); + if (uacpi_unlikely_error(ret)) { uacpi_trace_region_error( node, "error during handler detach for", ret @@ -262,13 +325,47 @@ void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node) } } - if (region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED) + if ((region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED) && + unreg == UNREG_YES) { region_run_reg(node, ACPI_REG_DISCONNECT); + region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED; + } uacpi_address_space_handler_unref(region->handler); region->handler = UACPI_NULL; - region->state_flags &= ~(UACPI_OP_REGION_STATE_ATTACHED | - UACPI_OP_REGION_STATE_REG_EXECUTED); + region->state_flags &= ~UACPI_OP_REGION_STATE_ATTACHED; +} + +static uacpi_status upgrade_to_opregion_lock(void) +{ + uacpi_status ret; + + /* + * Drop the namespace lock, and reacquire it after the opregion lock + * so we keep the ordering with user API. + */ + uacpi_namespace_write_unlock(); + + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + uacpi_namespace_write_lock(); + return ret; +} + +void uacpi_opregion_uninstall_handler(uacpi_namespace_node *node) +{ + if (uacpi_unlikely_error(upgrade_to_opregion_lock())) + return; + + region_uninstall_handler(node, UNREG_YES); + + uacpi_recursive_lock_release(&g_opregion_lock); +} + +uacpi_bool uacpi_address_space_handler_is_default( + uacpi_address_space_handler *handler +) +{ + return handler->flags & UACPI_ADDRESS_SPACE_HANDLER_DEFAULT; } enum opregion_iter_action { @@ -281,24 +378,26 @@ struct opregion_iter_ctx { uacpi_address_space_handler *handler; }; -static enum uacpi_ns_iteration_decision do_install_or_uninstall_handler( - uacpi_handle opaque, uacpi_namespace_node *node +static uacpi_iteration_decision do_install_or_uninstall_handler( + uacpi_handle opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { struct opregion_iter_ctx *ctx = opaque; uacpi_address_space_handlers *handlers; uacpi_object *object; + UACPI_UNUSED(depth); + object = uacpi_namespace_node_get_object(node); if (object->type == UACPI_OBJECT_OPERATION_REGION) { uacpi_operation_region *region = object->op_region; if (region->space != ctx->handler->space) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; if (ctx->action == OPREGION_ITER_ACTION_INSTALL) { if (region->handler) - uacpi_opregion_uninstall_handler(node); + region_uninstall_handler(node, UNREG_NO); region_install_handler(node, ctx->handler); } else { @@ -307,39 +406,24 @@ static enum uacpi_ns_iteration_decision do_install_or_uninstall_handler( node, "handler mismatch for", UACPI_STATUS_INTERNAL_ERROR ); - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } - uacpi_opregion_uninstall_handler(node); + region_uninstall_handler(node, UNREG_NO); } - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } handlers = uacpi_node_get_address_space_handlers(node); if (handlers == UACPI_NULL) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; // Device already has a handler for this space installed if (find_handler(handlers, ctx->handler->space) != UACPI_NULL) - return UACPI_NS_ITERATION_DECISION_NEXT_PEER; + return UACPI_ITERATION_DECISION_NEXT_PEER; - return UACPI_NS_ITERATION_DECISION_CONTINUE; -} - -void uacpi_opregion_reg(uacpi_namespace_node *node) -{ - uacpi_operation_region *region; - - region = uacpi_namespace_node_get_object(node)->op_region; - if (region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED) - return; - - if (!space_needs_reg(region->space)) - return; - - if (region_run_reg(node, ACPI_REG_CONNECT) == UACPI_STATUS_OK) - region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED; + return UACPI_ITERATION_DECISION_CONTINUE; } struct reg_run_ctx { @@ -349,99 +433,184 @@ struct reg_run_ctx { uacpi_size reg_errors; }; -enum uacpi_ns_iteration_decision do_run_reg( - void *opaque, uacpi_namespace_node *node +static uacpi_iteration_decision do_run_reg( + void *opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { struct reg_run_ctx *ctx = opaque; - uacpi_object *object; uacpi_operation_region *region; uacpi_status ret; + uacpi_bool was_regged; - object = uacpi_namespace_node_get_object(node); - if (object->type != UACPI_OBJECT_OPERATION_REGION) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + UACPI_UNUSED(depth); - region = object->op_region; + region = uacpi_namespace_node_get_object(node)->op_region; - if (region->space != ctx->space || - (region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED)) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + if (region->space != ctx->space) + return UACPI_ITERATION_DECISION_CONTINUE; - if (region->handler == UACPI_NULL && - ctx->connection_code != ACPI_REG_DISCONNECT) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + was_regged = region->state_flags & UACPI_OP_REGION_STATE_REG_EXECUTED; + if (was_regged == (ctx->connection_code == ACPI_REG_CONNECT)) + return UACPI_ITERATION_DECISION_CONTINUE; ret = region_run_reg(node, ctx->connection_code); + if (ctx->connection_code == ACPI_REG_DISCONNECT) + region->state_flags &= ~UACPI_OP_REGION_STATE_REG_EXECUTED; + if (ret == UACPI_STATUS_NOT_FOUND) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; + + if (ctx->connection_code == ACPI_REG_CONNECT) + region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED; ctx->reg_executed++; if (uacpi_unlikely_error(ret)) { ctx->reg_errors++; - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } - region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED; - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } -uacpi_status uacpi_reg_all_opregions( - uacpi_namespace_node *device_node, - enum uacpi_address_space space +static uacpi_status reg_or_unreg_all_opregions( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_u8 connection_code ) { uacpi_address_space_handlers *handlers; - uacpi_address_space_handler *this_handler; + uacpi_bool is_connect; + enum uacpi_permanent_only perm_only; struct reg_run_ctx ctx = { .space = space, - .connection_code = ACPI_REG_CONNECT, + .connection_code = connection_code, }; - if (!space_needs_reg(space)) - return UACPI_STATUS_OK; - handlers = uacpi_node_get_address_space_handlers(device_node); if (uacpi_unlikely(handlers == UACPI_NULL)) return UACPI_STATUS_INVALID_ARGUMENT; - this_handler = find_handler(handlers, space); - if (uacpi_unlikely(this_handler == UACPI_NULL)) + is_connect = connection_code == ACPI_REG_CONNECT; + if (uacpi_unlikely(is_connect && + find_handler(handlers, space) == UACPI_NULL)) return UACPI_STATUS_NO_HANDLER; - uacpi_namespace_for_each_node_depth_first( - device_node, do_run_reg, &ctx + /* + * We want to unreg non-permanent opregions as well, however, + * registering them is handled separately and should not be + * done by us. + */ + perm_only = is_connect ? UACPI_PERMANENT_ONLY_YES : UACPI_PERMANENT_ONLY_NO; + + uacpi_namespace_do_for_each_child( + device_node, do_run_reg, UACPI_NULL, + UACPI_OBJECT_OPERATION_REGION_BIT, UACPI_MAX_DEPTH_ANY, + UACPI_SHOULD_LOCK_NO, perm_only, &ctx ); uacpi_trace( - "activated all '%s' opregions controlled by '%.4s', " - "%zu _REG() calls (%zu errors)\n", uacpi_address_space_to_string(space), + "%sactivated all '%s' opregions controlled by '%.4s', " + "%zu _REG() calls (%zu errors)\n", + connection_code == ACPI_REG_CONNECT ? "" : "de", + uacpi_address_space_to_string(space), device_node->name.text, ctx.reg_executed, ctx.reg_errors ); return UACPI_STATUS_OK; } -uacpi_status uacpi_install_address_space_handler( +static uacpi_address_space_handlers *extract_handlers( + uacpi_namespace_node *node +) +{ + uacpi_object *handlers_obj; + + if (node == uacpi_namespace_root()) + return g_uacpi_rt_ctx.root_object->address_space_handlers; + + handlers_obj = uacpi_namespace_node_get_object_typed( + node, + UACPI_OBJECT_DEVICE_BIT | UACPI_OBJECT_THERMAL_ZONE_BIT | + UACPI_OBJECT_PROCESSOR_BIT + ); + if (uacpi_unlikely(handlers_obj == UACPI_NULL)) + return UACPI_NULL; + + return handlers_obj->address_space_handlers; +} + +uacpi_status uacpi_reg_all_opregions( + uacpi_namespace_node *device_node, + enum uacpi_address_space space +) +{ + uacpi_status ret; + + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); + + if (!space_needs_reg(space)) + return UACPI_STATUS_OK; + + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; + } + + if (uacpi_unlikely(extract_handlers(device_node) == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + ret = reg_or_unreg_all_opregions(device_node, space, ACPI_REG_CONNECT); + +out: + uacpi_namespace_write_unlock(); + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_status uacpi_install_address_space_handler_with_flags( uacpi_namespace_node *device_node, enum uacpi_address_space space, - uacpi_region_handler handler, uacpi_handle handler_context + uacpi_region_handler handler, uacpi_handle handler_context, + uacpi_u16 flags ) { + uacpi_status ret; uacpi_address_space_handlers *handlers; uacpi_address_space_handler *this_handler, *new_handler; struct opregion_iter_ctx iter_ctx; - handlers = uacpi_node_get_address_space_handlers(device_node); - if (uacpi_unlikely(handlers == UACPI_NULL)) - return UACPI_STATUS_INVALID_ARGUMENT; + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; + } + + handlers = extract_handlers(device_node); + if (uacpi_unlikely(handlers == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } this_handler = find_handler(handlers, space); - if (this_handler != UACPI_NULL) - return UACPI_STATUS_ALREADY_EXISTS; + if (this_handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; + goto out; + } new_handler = uacpi_kernel_alloc(sizeof(*new_handler)); - if (new_handler == UACPI_NULL) - return UACPI_STATUS_OUT_OF_MEMORY; + if (new_handler == UACPI_NULL) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out; + } uacpi_shareable_init(new_handler); new_handler->next = handlers->head; @@ -449,17 +618,20 @@ uacpi_status uacpi_install_address_space_handler( new_handler->user_context = handler_context; new_handler->callback = handler; new_handler->regions = UACPI_NULL; + new_handler->flags = flags; handlers->head = new_handler; iter_ctx.handler = new_handler; iter_ctx.action = OPREGION_ITER_ACTION_INSTALL; - uacpi_namespace_for_each_node_depth_first( - device_node, do_install_or_uninstall_handler, &iter_ctx + uacpi_namespace_do_for_each_child( + device_node, do_install_or_uninstall_handler, UACPI_NULL, + UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO, + UACPI_PERMANENT_ONLY_YES, &iter_ctx ); if (!space_needs_reg(space)) - return UACPI_STATUS_OK; + goto out; /* * Installing an early address space handler, obviously not possible to @@ -469,20 +641,27 @@ uacpi_status uacpi_install_address_space_handler( * the namespace. */ if (g_uacpi_rt_ctx.init_level < UACPI_INIT_LEVEL_NAMESPACE_LOADED) - return UACPI_STATUS_OK; - - /* - * _REG methods for global address space handlers (installed to root) - * get called during the namespace initialization, no reason - * to call them here manually as that will be done later by init code - * anyway. Just delay that work until later. - */ - if (device_node == uacpi_namespace_root() && - g_uacpi_rt_ctx.init_level == UACPI_INIT_LEVEL_NAMESPACE_LOADED) - return UACPI_STATUS_OK; + goto out; // Init level is NAMESPACE_INITIALIZED, so we can safely run _REG now - return uacpi_reg_all_opregions(device_node, space); + ret = reg_or_unreg_all_opregions( + device_node, space, ACPI_REG_CONNECT + ); + +out: + uacpi_namespace_write_unlock(); + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_status uacpi_install_address_space_handler( + uacpi_namespace_node *device_node, enum uacpi_address_space space, + uacpi_region_handler handler, uacpi_handle handler_context +) +{ + return uacpi_install_address_space_handler_with_flags( + device_node, space, handler, handler_context, 0 + ); } uacpi_status uacpi_uninstall_address_space_handler( @@ -490,23 +669,40 @@ uacpi_status uacpi_uninstall_address_space_handler( enum uacpi_address_space space ) { + uacpi_status ret; uacpi_address_space_handlers *handlers; - uacpi_address_space_handler *handler, *prev_handler; + uacpi_address_space_handler *handler = UACPI_NULL, *prev_handler; struct opregion_iter_ctx iter_ctx; - handlers = uacpi_node_get_address_space_handlers(device_node); - if (uacpi_unlikely(handlers == UACPI_NULL)) - return UACPI_STATUS_INVALID_ARGUMENT; + ret = uacpi_recursive_lock_acquire(&g_opregion_lock); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) { + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; + } + + handlers = extract_handlers(device_node); + if (uacpi_unlikely(handlers == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } handler = find_handler(handlers, space); - if (uacpi_unlikely(handler == UACPI_NULL)) - return UACPI_STATUS_NO_HANDLER; + if (uacpi_unlikely(handler == UACPI_NULL)) { + ret = UACPI_STATUS_NO_HANDLER; + goto out; + } iter_ctx.handler = handler; iter_ctx.action = OPREGION_ITER_ACTION_UNINSTALL; - uacpi_namespace_for_each_node_depth_first( - device_node, do_install_or_uninstall_handler, &iter_ctx + uacpi_namespace_do_for_each_child( + device_node, do_install_or_uninstall_handler, UACPI_NULL, + UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, UACPI_SHOULD_LOCK_NO, + UACPI_PERMANENT_ONLY_NO, &iter_ctx ); prev_handler = handlers->head; @@ -514,7 +710,7 @@ uacpi_status uacpi_uninstall_address_space_handler( // Are we the last linked handler? if (prev_handler == handler) { handlers->head = handler->next; - goto out; + goto out_unreg; } // Nope, we're somewhere in the middle. Do a search. @@ -527,35 +723,159 @@ uacpi_status uacpi_uninstall_address_space_handler( prev_handler = prev_handler->next; } +out_unreg: + if (space_needs_reg(space)) + reg_or_unreg_all_opregions(device_node, space, ACPI_REG_DISCONNECT); + out: - uacpi_address_space_handler_unref(handler); - return UACPI_STATUS_OK; + if (handler != UACPI_NULL) + uacpi_address_space_handler_unref(handler); + + uacpi_namespace_write_unlock(); + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; } -uacpi_status uacpi_opregion_find_and_install_handler( - uacpi_namespace_node *node -) +uacpi_status uacpi_initialize_opregion_node(uacpi_namespace_node *node) { + uacpi_status ret; uacpi_namespace_node *parent = node->parent; + uacpi_operation_region *region; uacpi_address_space_handlers *handlers; uacpi_address_space_handler *handler; - uacpi_u8 space; - space = uacpi_namespace_node_get_object(node)->op_region->space; + ret = upgrade_to_opregion_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + region = uacpi_namespace_node_get_object(node)->op_region; + ret = UACPI_STATUS_NOT_FOUND; while (parent) { handlers = uacpi_node_get_address_space_handlers(parent); if (handlers != UACPI_NULL) { - handler = find_handler(handlers, space); + handler = find_handler(handlers, region->space); if (handler != UACPI_NULL) { region_install_handler(node, handler); - return UACPI_STATUS_OK; + ret = UACPI_STATUS_OK; + break; } } parent = parent->parent; } - return UACPI_STATUS_NOT_FOUND; + if (ret != UACPI_STATUS_OK) + goto out; + if (!space_needs_reg(region->space)) + goto out; + if (uacpi_get_current_init_level() < UACPI_INIT_LEVEL_NAMESPACE_LOADED) + goto out; + + if (region_run_reg(node, ACPI_REG_CONNECT) != UACPI_STATUS_NOT_FOUND) + region->state_flags |= UACPI_OP_REGION_STATE_REG_EXECUTED; + +out: + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; +} + +uacpi_status uacpi_dispatch_opregion_io( + uacpi_namespace_node *region_node, uacpi_u32 offset, uacpi_u8 byte_width, + uacpi_region_op op, uacpi_u64 *in_out +) +{ + uacpi_status ret; + uacpi_object *obj; + uacpi_operation_region *region; + uacpi_address_space_handler *handler; + uacpi_address_space space; + uacpi_u64 offset_end; + + uacpi_region_rw_data data = { + .byte_width = byte_width, + .offset = offset, + }; + + ret = upgrade_to_opregion_lock(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_opregion_attach(region_node); + if (uacpi_unlikely_error(ret)) { + uacpi_trace_region_error( + region_node, "unable to attach", ret + ); + goto out; + } + + obj = uacpi_namespace_node_get_object_typed( + region_node, UACPI_OBJECT_OPERATION_REGION_BIT + ); + if (uacpi_unlikely(obj == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out; + } + + region = obj->op_region; + space = region->space; + handler = region->handler; + + offset_end = offset; + offset_end += byte_width; + data.offset += region->offset; + + if (uacpi_unlikely(region->length < offset_end || + data.offset < offset)) { + const uacpi_char *path; + + path = uacpi_namespace_node_generate_absolute_path(region_node); + uacpi_error( + "out-of-bounds access to opregion %s[0x%"UACPI_PRIX64"->" + "0x%"UACPI_PRIX64"] at 0x%"UACPI_PRIX64" (idx=%u, width=%d)\n", + path, UACPI_FMT64(region->offset), + UACPI_FMT64(region->offset + region->length), + UACPI_FMT64(data.offset), offset, byte_width + ); + uacpi_free_dynamic_string(path); + ret = UACPI_STATUS_AML_OUT_OF_BOUNDS_INDEX; + goto out; + } + + data.handler_context = handler->user_context; + data.region_context = region->user_context; + + if (op == UACPI_REGION_OP_WRITE) { + data.value = *in_out; + uacpi_trace_region_io( + region_node, space, op, data.offset, + byte_width, data.value + ); + } + + uacpi_object_ref(obj); + uacpi_namespace_write_unlock(); + + ret = handler->callback(op, &data); + + uacpi_namespace_write_lock(); + uacpi_object_unref(obj); + + if (uacpi_unlikely_error(ret)) { + uacpi_trace_region_error(region_node, "unable to perform IO", ret); + goto out; + } + + if (op == UACPI_REGION_OP_READ) { + *in_out = data.value; + uacpi_trace_region_io( + region_node, space, op, data.offset, + byte_width, data.value + ); + } + +out: + uacpi_recursive_lock_release(&g_opregion_lock); + return ret; } diff --git a/drivers/bus/acpi_new/source/osi.c b/drivers/bus/acpi_new/source/osi.c index ad7f0ec7a1fdd..c9f379e9e754f 100644 --- a/drivers/bus/acpi_new/source/osi.c +++ b/drivers/bus/acpi_new/source/osi.c @@ -3,6 +3,7 @@ #include #include #include +#include #include struct registered_interface { @@ -172,20 +173,22 @@ uacpi_status uacpi_install_interface( ) { struct registered_interface *interface; - uacpi_status ret = UACPI_STATUS_ALREADY_EXISTS; + uacpi_status ret; uacpi_char *name_copy; uacpi_size name_size; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE(interface_mutex); + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; interface = find_interface_unlocked(name); if (interface != UACPI_NULL) { - if (interface->disabled) { + if (interface->disabled) interface->disabled = UACPI_FALSE; - ret = UACPI_STATUS_OK; - } + + ret = UACPI_STATUS_ALREADY_EXISTS; goto out; } @@ -212,25 +215,27 @@ uacpi_status uacpi_install_interface( interface->dynamic = 1; interface->next = registered_interfaces; registered_interfaces = interface; - ret = UACPI_STATUS_OK; out: - UACPI_MUTEX_RELEASE(interface_mutex); + uacpi_release_native_mutex(interface_mutex); return ret; } uacpi_status uacpi_uninstall_interface(const uacpi_char *name) { struct registered_interface *cur, *prev; - uacpi_status ret = UACPI_STATUS_NOT_FOUND; + uacpi_status ret; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE(interface_mutex); + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; cur = registered_interfaces; prev = cur; + ret = UACPI_STATUS_NOT_FOUND; while (cur) { if (uacpi_strcmp(cur->name, name) != 0) { prev = cur; @@ -245,7 +250,7 @@ uacpi_status uacpi_uninstall_interface(const uacpi_char *name) prev->next = cur->next; } - UACPI_MUTEX_RELEASE(interface_mutex); + uacpi_release_native_mutex(interface_mutex); uacpi_free_dynamic_string(cur->name); uacpi_free(cur, sizeof(*cur)); return UACPI_STATUS_OK; @@ -265,7 +270,7 @@ uacpi_status uacpi_uninstall_interface(const uacpi_char *name) break; } - UACPI_MUTEX_RELEASE(interface_mutex); + uacpi_release_native_mutex(interface_mutex); return ret; } @@ -274,21 +279,23 @@ static uacpi_status configure_host_interface( ) { struct registered_interface *interface; - uacpi_status ret = UACPI_STATUS_NOT_FOUND; + uacpi_status ret; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE(interface_mutex); + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; interface = find_host_interface_unlocked(type); - if (interface == UACPI_NULL) + if (interface == UACPI_NULL) { + ret = UACPI_STATUS_NOT_FOUND; goto out; + } interface->disabled = !enabled; - ret = UACPI_STATUS_OK; - out: - UACPI_MUTEX_RELEASE(interface_mutex); + uacpi_release_native_mutex(interface_mutex); return ret; } @@ -306,20 +313,22 @@ uacpi_status uacpi_set_interface_query_handler( uacpi_interface_handler handler ) { - uacpi_status ret = UACPI_STATUS_ALREADY_EXISTS; + uacpi_status ret; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE(interface_mutex); + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; - if (interface_handler != UACPI_NULL && handler != UACPI_NULL) + if (interface_handler != UACPI_NULL && handler != UACPI_NULL) { + ret = UACPI_STATUS_ALREADY_EXISTS; goto out; + } interface_handler = handler; - ret = UACPI_STATUS_OK; - out: - UACPI_MUTEX_RELEASE(interface_mutex); + uacpi_release_native_mutex(interface_mutex); return ret; } @@ -327,11 +336,14 @@ uacpi_status uacpi_bulk_configure_interfaces( uacpi_interface_action action, uacpi_interface_kind kind ) { + uacpi_status ret; struct registered_interface *interface; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE(interface_mutex); + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; interface = registered_interfaces; while (interface) { @@ -341,16 +353,19 @@ uacpi_status uacpi_bulk_configure_interfaces( interface = interface->next; } - UACPI_MUTEX_RELEASE(interface_mutex); - return UACPI_STATUS_OK; + uacpi_release_native_mutex(interface_mutex); + return ret; } uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value) { + uacpi_status ret; struct registered_interface *interface; uacpi_bool is_supported = UACPI_FALSE; - UACPI_MUTEX_ACQUIRE(interface_mutex); + ret = uacpi_acquire_native_mutex(interface_mutex); + if (uacpi_unlikely_error(ret)) + return ret; interface = find_interface_unlocked(string); if (interface == UACPI_NULL) @@ -363,7 +378,7 @@ uacpi_status uacpi_handle_osi(const uacpi_char *string, uacpi_bool *out_value) if (interface_handler) is_supported = interface_handler(string, is_supported); out: - UACPI_MUTEX_RELEASE(interface_mutex); + uacpi_release_native_mutex(interface_mutex); *out_value = is_supported; return UACPI_STATUS_OK; } diff --git a/drivers/bus/acpi_new/source/registers.c b/drivers/bus/acpi_new/source/registers.c index 5e5d293af83e4..1efe2800d9346 100644 --- a/drivers/bus/acpi_new/source/registers.c +++ b/drivers/bus/acpi_new/source/registers.c @@ -106,7 +106,7 @@ static uacpi_status read_one( return uacpi_gas_read(reg, out_value); } - return uacpi_kernel_raw_io_read(*(uacpi_u32*)reg, byte_width, out_value); + return uacpi_system_io_read(*(uacpi_u32*)reg, byte_width, out_value); } static uacpi_status write_one( @@ -123,7 +123,7 @@ static uacpi_status write_one( return uacpi_gas_write(reg, in_value); } - return uacpi_kernel_raw_io_write(*(uacpi_u32*)reg, byte_width, in_value); + return uacpi_system_io_write(*(uacpi_u32*)reg, byte_width, in_value); } static uacpi_status do_read_register( @@ -351,6 +351,25 @@ static const struct register_field fields[UACPI_REGISTER_FIELD_MAX + 1] = { }, }; +static uacpi_handle g_reg_lock; + +uacpi_status uacpi_ininitialize_registers(void) +{ + g_reg_lock = uacpi_kernel_create_spinlock(); + if (uacpi_unlikely(g_reg_lock == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + return UACPI_STATUS_OK; +} + +void uacpi_deininitialize_registers(void) +{ + if (g_reg_lock != UACPI_NULL) { + uacpi_kernel_free_spinlock(g_reg_lock); + g_reg_lock = UACPI_NULL; + } +} + uacpi_status uacpi_read_register_field( enum uacpi_register_field field_enum, uacpi_u64 *out_value ) @@ -383,6 +402,7 @@ uacpi_status uacpi_write_register_field( const struct register_field *field; const struct register_spec *reg; uacpi_u64 data; + uacpi_cpu_flags flags; if (uacpi_unlikely(field_idx > UACPI_REGISTER_FIELD_MAX)) return UACPI_STATUS_INVALID_ARGUMENT; @@ -392,19 +412,28 @@ uacpi_status uacpi_write_register_field( in_value = (in_value << field->offset) & field->mask; + flags = uacpi_kernel_lock_spinlock(g_reg_lock); + if (reg->kind == REGISTER_ACCESS_KIND_WRITE_TO_CLEAR) { - if (in_value == 0) - return UACPI_STATUS_OK; + if (in_value == 0) { + ret = UACPI_STATUS_OK; + goto out; + } - return do_write_register(reg, in_value); + ret = do_write_register(reg, in_value); + goto out; } ret = do_read_register(reg, &data); if (uacpi_unlikely_error(ret)) - return ret; + goto out; data &= ~field->mask; data |= in_value; - return do_write_register(reg, data); + ret = do_write_register(reg, data); + +out: + uacpi_kernel_unlock_spinlock(g_reg_lock, flags); + return ret; } diff --git a/drivers/bus/acpi_new/source/resources.c b/drivers/bus/acpi_new/source/resources.c index 4c2cb93b75122..c30d492481c10 100644 --- a/drivers/bus/acpi_new/source/resources.c +++ b/drivers/bus/acpi_new/source/resources.c @@ -4,6 +4,7 @@ #include #include #include +#include #include #define LARGE_RESOURCE_BASE (ACPI_RESOURCE_END_TAG + 1) @@ -1416,7 +1417,7 @@ uacpi_status uacpi_for_each_aml_resource( ) { uacpi_status ret; - uacpi_resource_iteration_decision decision; + uacpi_iteration_decision decision; uacpi_u8 *data; uacpi_size bytes_left; uacpi_u16 resource_size; @@ -1466,9 +1467,9 @@ uacpi_status uacpi_for_each_aml_resource( decision = cb(user, data, resource_size, spec); switch (decision) { - case UACPI_RESOURCE_ITERATION_ABORT: + case UACPI_ITERATION_DECISION_BREAK: return UACPI_STATUS_OK; - case UACPI_RESOURCE_ITERATION_CONTINUE: { + case UACPI_ITERATION_DECISION_CONTINUE: { uacpi_size total_size = resource_size; total_size += aml_resource_kind_to_header_size[spec->resource_kind]; @@ -1487,7 +1488,7 @@ uacpi_status uacpi_for_each_aml_resource( return UACPI_STATUS_NO_RESOURCE_END_TAG; } -static uacpi_resource_iteration_decision find_end( +static uacpi_iteration_decision find_end( void *opaque, uacpi_u8 *data, uacpi_u16 resource_size, const struct uacpi_resource_spec *spec ) @@ -1496,10 +1497,10 @@ static uacpi_resource_iteration_decision find_end( UACPI_UNUSED(resource_size); if (spec->type != UACPI_AML_RESOURCE_END_TAG) - return UACPI_RESOURCE_ITERATION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; *out_ptr = data; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } static uacpi_size native_size_for_aml_resource( @@ -1551,7 +1552,7 @@ struct resource_conversion_ctx { // Opcodes that are the same for both AML->native and native->AML #define CONVERSION_OPCODES_COMMON(native_buf) \ case UACPI_RESOURCE_CONVERT_OPCODE_END: \ - return UACPI_RESOURCE_ITERATION_CONTINUE; \ + return UACPI_ITERATION_DECISION_CONTINUE; \ \ case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_8: \ case UACPI_RESOURCE_CONVERT_OPCODE_FIELD_16: \ @@ -1613,7 +1614,7 @@ struct resource_conversion_ctx { uacpi_error("tried to execute unreachable conversion opcode\n"); \ } \ ctx->st = UACPI_STATUS_INTERNAL_ERROR; \ - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; #define PTR_AT(ptr, offset) (void*)((uacpi_u8*)(ptr) + (offset)) @@ -1628,7 +1629,7 @@ struct resource_conversion_ctx { uacpi_error(prefix what " is OOB: %zu > %u\n", \ (uacpi_size)offset, (uacpi_u32)aml_size + header_size); \ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; \ - return UACPI_RESOURCE_ITERATION_ABORT; \ + return UACPI_ITERATION_DECISION_BREAK; \ } #define CHECK_AML_OFFSET_BASE(offset, what) \ @@ -1637,7 +1638,7 @@ struct resource_conversion_ctx { "invalid " what " offset: %zu, expected at least %u\n", \ (uacpi_size)offset, base_aml_size_with_header); \ ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; \ - return UACPI_RESOURCE_ITERATION_ABORT; \ + return UACPI_ITERATION_DECISION_BREAK; \ } #define CHECK_AML_OFFSET(offset, what) \ @@ -1652,7 +1653,7 @@ static uacpi_resource_type aml_serial_to_native_type( UACPI_RESOURCE_TYPE_SERIAL_I2C_CONNECTION; } -static uacpi_resource_iteration_decision do_aml_resource_to_native( +static uacpi_iteration_decision do_aml_resource_to_native( void *opaque, uacpi_u8 *data, uacpi_u16 aml_size, const struct uacpi_resource_spec *spec ) @@ -1678,7 +1679,7 @@ static uacpi_resource_iteration_decision do_aml_resource_to_native( base_aml_size_with_header += header_size; if (insns == UACPI_NULL) - return UACPI_RESOURCE_ITERATION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; for (;;) { insn = &insns[pc++]; @@ -1785,7 +1786,7 @@ static uacpi_resource_iteration_decision do_aml_resource_to_native( if (src_string[length - 1] != '\0') { uacpi_error("non-null-terminated resource source string\n"); ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } dst_string = PTR_AT(resource_end, accumulator); @@ -1891,7 +1892,7 @@ static uacpi_resource_iteration_decision do_aml_resource_to_native( "expected at least %d\n", type_length, extra_size ); ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } /* @@ -1935,7 +1936,7 @@ static uacpi_status aml_resources_to_native( return ctx.st; } -static uacpi_resource_iteration_decision accumulate_native_buffer_size( +static uacpi_iteration_decision accumulate_native_buffer_size( void *opaque, uacpi_u8 *data, uacpi_u16 resource_size, const struct uacpi_resource_spec *spec ) @@ -1948,11 +1949,11 @@ static uacpi_resource_iteration_decision accumulate_native_buffer_size( uacpi_error("invalid native size for aml resource: %zu\n", size_for_this); ctx->st = UACPI_STATUS_AML_INVALID_RESOURCE; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } ctx->size += size_for_this; - return UACPI_RESOURCE_ITERATION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } static uacpi_status eval_resource_helper( @@ -2056,7 +2057,7 @@ uacpi_status uacpi_for_each_resource( { uacpi_size bytes_left = resources->length; uacpi_resource *current = resources->entries; - uacpi_resource_iteration_decision decision; + uacpi_iteration_decision decision; while (bytes_left) { // At least the head bytes @@ -2079,7 +2080,7 @@ uacpi_status uacpi_for_each_resource( decision = cb(user, current); - if (decision == UACPI_RESOURCE_ITERATION_ABORT || + if (decision == UACPI_ITERATION_DECISION_BREAK || current->type == UACPI_RESOURCE_TYPE_END_TAG) return UACPI_STATUS_OK; @@ -2124,7 +2125,7 @@ static uacpi_size aml_size_for_native_resource( aml_size_with_header(spec); } -static uacpi_resource_iteration_decision do_native_resource_to_aml( +static uacpi_iteration_decision do_native_resource_to_aml( void *opaque, uacpi_resource *resource ) { @@ -2159,7 +2160,7 @@ static uacpi_resource_iteration_decision do_native_resource_to_aml( } if (insns == UACPI_NULL) - return UACPI_RESOURCE_ITERATION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; for (;;) { insn = &insns[pc++]; @@ -2234,7 +2235,7 @@ static uacpi_resource_iteration_decision do_native_resource_to_aml( length ); ctx->st = UACPI_STATUS_INVALID_ARGUMENT; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } uacpi_memcpy(dst_string, src_string, length); @@ -2283,7 +2284,7 @@ static uacpi_resource_iteration_decision do_native_resource_to_aml( vendor_data_length ); ctx->st = UACPI_STATUS_INVALID_ARGUMENT; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } /* @@ -2339,7 +2340,7 @@ static uacpi_resource_iteration_decision do_native_resource_to_aml( ctx->st = validate_aml_serial_type(serial_type); if (uacpi_unlikely_error(ctx->st)) - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; if (uacpi_unlikely(resource->type != aml_serial_to_native_type(serial_type))) { @@ -2348,7 +2349,7 @@ static uacpi_resource_iteration_decision do_native_resource_to_aml( resource->type, aml_serial_to_native_type(serial_type) ); ctx->st = UACPI_STATUS_INVALID_ARGUMENT; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } // Rebase the end pointer & size now that we know the serial type @@ -2403,7 +2404,7 @@ static uacpi_status native_resources_to_aml( return ctx.st; } -static uacpi_resource_iteration_decision accumulate_aml_buffer_size( +static uacpi_iteration_decision accumulate_aml_buffer_size( void *opaque, uacpi_resource *resource ) { @@ -2419,11 +2420,11 @@ static uacpi_resource_iteration_decision accumulate_aml_buffer_size( uacpi_error("invalid aml size for native resource: %zu\n", size_for_this); ctx->st = UACPI_STATUS_INVALID_ARGUMENT; - return UACPI_RESOURCE_ITERATION_ABORT; + return UACPI_ITERATION_DECISION_BREAK; } ctx->size += size_for_this; - return UACPI_RESOURCE_ITERATION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } uacpi_status uacpi_native_resources_to_aml( @@ -2484,7 +2485,7 @@ uacpi_status uacpi_set_resources( { uacpi_status ret; uacpi_object *res_template; - uacpi_args args; + uacpi_object_array args; ret = uacpi_native_resources_to_aml(resources, &res_template); if (uacpi_unlikely_error(ret)) diff --git a/drivers/bus/acpi_new/source/shareable.c b/drivers/bus/acpi_new/source/shareable.c index 5fe69f93e7677..9587990809771 100644 --- a/drivers/bus/acpi_new/source/shareable.c +++ b/drivers/bus/acpi_new/source/shareable.c @@ -1,4 +1,5 @@ #include +#include #define BUGGED_REFCOUNT 0xFFFFFFFF @@ -13,15 +14,15 @@ uacpi_bool uacpi_bugged_shareable(uacpi_handle handle) struct uacpi_shareable *shareable = handle; if (uacpi_unlikely(shareable->reference_count == 0)) - shareable->reference_count = BUGGED_REFCOUNT; + uacpi_make_shareable_bugged(shareable); - return shareable->reference_count == BUGGED_REFCOUNT; + return uacpi_atomic_load32(&shareable->reference_count) == BUGGED_REFCOUNT; } void uacpi_make_shareable_bugged(uacpi_handle handle) { struct uacpi_shareable *shareable = handle; - shareable->reference_count = BUGGED_REFCOUNT; + uacpi_atomic_store32(&shareable->reference_count, BUGGED_REFCOUNT); } uacpi_u32 uacpi_shareable_ref(uacpi_handle handle) @@ -31,7 +32,7 @@ uacpi_u32 uacpi_shareable_ref(uacpi_handle handle) if (uacpi_unlikely(uacpi_bugged_shareable(shareable))) return BUGGED_REFCOUNT; - return shareable->reference_count++; + return uacpi_atomic_inc32(&shareable->reference_count) - 1; } uacpi_u32 uacpi_shareable_unref(uacpi_handle handle) @@ -41,7 +42,7 @@ uacpi_u32 uacpi_shareable_unref(uacpi_handle handle) if (uacpi_unlikely(uacpi_bugged_shareable(shareable))) return BUGGED_REFCOUNT; - return shareable->reference_count--; + return uacpi_atomic_dec32(&shareable->reference_count) + 1; } void uacpi_shareable_unref_and_delete_if_last( @@ -61,5 +62,5 @@ void uacpi_shareable_unref_and_delete_if_last( uacpi_u32 uacpi_shareable_refcount(uacpi_handle handle) { struct uacpi_shareable *shareable = handle; - return shareable->reference_count; + return uacpi_atomic_load32(&shareable->reference_count); } diff --git a/drivers/bus/acpi_new/source/sleep.c b/drivers/bus/acpi_new/source/sleep.c index bca0938ed7335..1148c099ba009 100644 --- a/drivers/bus/acpi_new/source/sleep.c +++ b/drivers/bus/acpi_new/source/sleep.c @@ -268,7 +268,7 @@ static uacpi_status eval_sleep_helper( ) { uacpi_object *arg; - uacpi_args args; + uacpi_object_array args; uacpi_status ret; arg = uacpi_create_object(UACPI_OBJECT_INTEGER); @@ -550,9 +550,7 @@ uacpi_status uacpi_reboot(void) * For SystemIO we don't do any checking, and we ignore bit width * because that's what NT does. */ - ret = uacpi_kernel_raw_io_write( - reset_reg->address, 1, fadt->reset_value - ); + ret = uacpi_system_io_write(reset_reg->address, 1, fadt->reset_value); break; case UACPI_ADDRESS_SPACE_SYSTEM_MEMORY: ret = uacpi_write_register(UACPI_REGISTER_RESET, fadt->reset_value); diff --git a/drivers/bus/acpi_new/source/stdlib.c b/drivers/bus/acpi_new/source/stdlib.c index 992c0d8e41742..37cfc516f276e 100644 --- a/drivers/bus/acpi_new/source/stdlib.c +++ b/drivers/bus/acpi_new/source/stdlib.c @@ -1,5 +1,6 @@ #include #include +#include #ifndef uacpi_memcpy void *uacpi_memcpy(void *dest, const void *src, size_t count) @@ -116,6 +117,7 @@ struct fmt_spec { uacpi_u8 uppercase : 1; uacpi_u8 left_justify : 1; uacpi_u8 alternate_form : 1; + uacpi_u8 has_precision : 1; uacpi_char pad_char; uacpi_char prepend_char; uacpi_u64 min_width; @@ -394,7 +396,6 @@ uacpi_i32 uacpi_vsnprintf( struct fmt_spec fm = { .pad_char = ' ', .base = 10, - .precision = 0xFFFFFFFFFFFFFFFF, }; next_conversion = find_next_conversion(fmt, &next_offset); @@ -441,6 +442,8 @@ uacpi_i32 uacpi_vsnprintf( } if (consume(&fmt, ".")) { + fm.has_precision = UACPI_TRUE; + if (consume(&fmt, "*")) { fm.precision = uacpi_va_arg(vlist, int); } else { @@ -461,9 +464,10 @@ uacpi_i32 uacpi_vsnprintf( const uacpi_char *string = uacpi_va_arg(vlist, uacpi_char*); uacpi_size i; - for (i = 0; i < fm.precision && string[i]; ++i) + for (i = 0; (!fm.has_precision || i < fm.precision) && string[i]; ++i) write_one(&fb_state, string[i]); - + while (fm.has_precision && (i++ < fm.precision)) + write_one(&fb_state, fm.pad_char); continue; } @@ -662,15 +666,6 @@ uacpi_u8 uacpi_popcount(uacpi_u64 value) #ifndef UACPI_FORMATTED_LOGGING -#ifndef UACPI_PLAIN_LOG_BUFFER_SIZE - #define UACPI_PLAIN_LOG_BUFFER_SIZE 128 -#endif - -UACPI_BUILD_BUG_ON_WITH_MSG( - UACPI_PLAIN_LOG_BUFFER_SIZE < 16, - "configured log buffer size is too small (expecting at least 16 bytes)" -); - void uacpi_log(uacpi_log_level lvl, const uacpi_char *str, ...) { uacpi_char buf[UACPI_PLAIN_LOG_BUFFER_SIZE]; diff --git a/drivers/bus/acpi_new/source/tables.c b/drivers/bus/acpi_new/source/tables.c index 49b45b05bc5cc..f5a178f715286 100644 --- a/drivers/bus/acpi_new/source/tables.c +++ b/drivers/bus/acpi_new/source/tables.c @@ -2,30 +2,16 @@ #include #include #include - -#ifndef UACPI_STATIC_TABLE_ARRAY_LEN - #define UACPI_STATIC_TABLE_ARRAY_LEN 16 -#endif +#include +#include DYNAMIC_ARRAY_WITH_INLINE_STORAGE( table_array, struct uacpi_installed_table, UACPI_STATIC_TABLE_ARRAY_LEN ) DYNAMIC_ARRAY_WITH_INLINE_STORAGE_IMPL( - table_array, struct uacpi_installed_table, + table_array, struct uacpi_installed_table, static ) -#define UACPI_MUTEX_ACQUIRE_IF_EXISTS(mtx) \ - do { \ - if ((mtx) != UACPI_NULL) \ - UACPI_MUTEX_ACQUIRE(mtx); \ - } while (0) - -#define UACPI_MUTEX_RELEASE_IF_EXISTS(mtx) \ - do { \ - if ((mtx) != UACPI_NULL) \ - UACPI_MUTEX_RELEASE(mtx); \ - } while (0) - static struct table_array tables; static uacpi_bool early_table_access; static uacpi_table_installation_handler installation_handler; @@ -44,6 +30,38 @@ UACPI_PACKED(struct uacpi_rxsdt { uacpi_u8 ptr_bytes[]; }) +static void dump_table_header( + uacpi_phys_addr phys_addr, void *hdr +) +{ + struct acpi_sdt_hdr *sdt = hdr; + + if (uacpi_signatures_match(hdr, ACPI_FACS_SIGNATURE)) { + uacpi_info( + "FACS 0x%016"UACPI_PRIX64" %08X\n", UACPI_FMT64(phys_addr), + sdt->length + ); + return; + } + + if (!uacpi_memcmp(hdr, ACPI_RSDP_SIGNATURE, sizeof(ACPI_RSDP_SIGNATURE) - 1)) { + struct acpi_rsdp *rsdp = hdr; + + uacpi_info( + "RSDP 0x%016"UACPI_PRIX64" %08X v%02X (%.6s)\n", + UACPI_FMT64(phys_addr), rsdp->length, rsdp->revision, + rsdp->oemid + ); + return; + } + + uacpi_info( + "%.4s 0x%016"UACPI_PRIX64" %08X v%02X (%.6s %.8s)\n", + sdt->signature, UACPI_FMT64(phys_addr), sdt->length, sdt->revision, + sdt->oemid, sdt->oem_table_id + ); +} + static uacpi_status initialize_from_rxsdt(uacpi_phys_addr rxsdt_addr, uacpi_size entry_size) { @@ -56,6 +74,8 @@ static uacpi_status initialize_from_rxsdt(uacpi_phys_addr rxsdt_addr, if (rxsdt == UACPI_NULL) return UACPI_STATUS_MAPPING_FAILED; + dump_table_header(rxsdt_addr, rxsdt); + ret = uacpi_check_table_signature(rxsdt, entry_size == 8 ? ACPI_XSDT_SIGNATURE : ACPI_RSDT_SIGNATURE); if (uacpi_unlikely_error(ret)) @@ -120,6 +140,8 @@ static uacpi_status initialize_from_rsdp(void) if (rsdp == UACPI_NULL) return UACPI_STATUS_MAPPING_FAILED; + dump_table_header(rsdp_phys, rsdp); + if (rsdp->revision > 1 && rsdp->xsdt_addr && !uacpi_check_flag(UACPI_FLAG_BAD_XSDT)) { @@ -153,6 +175,8 @@ uacpi_status uacpi_setup_early_table_access( if (uacpi_unlikely(buffer_size < sizeof(struct uacpi_installed_table))) return UACPI_STATUS_INVALID_ARGUMENT; + uacpi_logger_initialize(); + tables.dynamic_storage = temporary_buffer; tables.dynamic_capacity = buffer_size / sizeof(struct uacpi_installed_table); early_table_access = UACPI_TRUE; @@ -164,7 +188,7 @@ uacpi_status uacpi_setup_early_table_access( return ret; } -static enum uacpi_table_iteration_decision warn_if_early_referenced( +static uacpi_iteration_decision warn_if_early_referenced( void *user, struct uacpi_installed_table *tbl, uacpi_size idx ) { @@ -177,7 +201,7 @@ static enum uacpi_table_iteration_decision warn_if_early_referenced( ); } - return UACPI_TABLE_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } uacpi_status uacpi_initialize_tables(void) @@ -290,14 +314,11 @@ uacpi_status uacpi_set_table_installation_handler( uacpi_table_installation_handler handler ) { - uacpi_status ret = UACPI_STATUS_OK; + uacpi_status ret; - /* - * The mutex might not exist yet because uacpi_initialize_tables might not - * have been called at this point, allow that possibility since the user - * might want to install this handler early. - */ - UACPI_MUTEX_ACQUIRE_IF_EXISTS(table_mutex); + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; if (installation_handler != UACPI_NULL && handler != UACPI_NULL) goto out; @@ -305,7 +326,7 @@ uacpi_status uacpi_set_table_installation_handler( installation_handler = handler; out: - UACPI_MUTEX_RELEASE_IF_EXISTS(table_mutex); + uacpi_release_native_mutex_may_be_null(table_mutex); return ret; } @@ -539,7 +560,10 @@ static uacpi_status verify_and_install_table( if (uacpi_unlikely_error(ret)) return ret; + dump_table_header(phys_addr, hdr); + uacpi_memcpy(&table->hdr, hdr, sizeof(*hdr)); + table->reference_count = 0; table->phys_addr = phys_addr; table->ptr = virt_addr; table->flags = flags; @@ -548,7 +572,7 @@ static uacpi_status verify_and_install_table( if (out_table == UACPI_NULL) return UACPI_STATUS_OK; - table->reference_count = 1; + table->reference_count++; out_table->ptr = virt_addr; out_table->index = idx; return UACPI_STATUS_OK; @@ -662,11 +686,14 @@ uacpi_status uacpi_table_install_physical_with_origin( { uacpi_status ret; - UACPI_MUTEX_ACQUIRE_IF_EXISTS(table_mutex); + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + ret = table_install_physical_with_origin_unlocked( phys, origin, UACPI_NULL, out_table ); - UACPI_MUTEX_RELEASE_IF_EXISTS(table_mutex); + uacpi_release_native_mutex_may_be_null(table_mutex); return ret; } @@ -727,10 +754,13 @@ uacpi_status uacpi_table_install_with_origin( { uacpi_status ret; - UACPI_MUTEX_ACQUIRE_IF_EXISTS(table_mutex); + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + ret = table_install_with_origin_unlocked(virt, origin, out_table); - UACPI_MUTEX_RELEASE_IF_EXISTS(table_mutex); + uacpi_release_native_mutex_may_be_null(table_mutex); return ret; } @@ -760,14 +790,17 @@ uacpi_status uacpi_for_each_table( uacpi_size base_idx, uacpi_table_iteration_callback cb, void *user ) { + uacpi_status ret; uacpi_size idx; struct uacpi_installed_table *tbl; - enum uacpi_table_iteration_decision ret; + uacpi_iteration_decision dec; if (!early_table_access) UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE_IF_EXISTS(table_mutex); + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; for (idx = base_idx; idx < table_array_size(&tables); ++idx) { tbl = table_array_at(&tables, idx); @@ -775,13 +808,13 @@ uacpi_status uacpi_for_each_table( if (tbl->flags & UACPI_TABLE_INVALID) continue; - ret = cb(user, tbl, idx); - if (ret == UACPI_TABLE_ITERATION_DECISION_BREAK) + dec = cb(user, tbl, idx); + if (dec == UACPI_ITERATION_DECISION_BREAK) break; } - UACPI_MUTEX_RELEASE_IF_EXISTS(table_mutex); - return UACPI_STATUS_OK; + uacpi_release_native_mutex_may_be_null(table_mutex); + return ret; } enum search_type { @@ -800,7 +833,7 @@ struct table_search_ctx { uacpi_status status; }; -static enum uacpi_table_iteration_decision do_search_tables( +static uacpi_iteration_decision do_search_tables( void *user, struct uacpi_installed_table *tbl, uacpi_size idx ) { @@ -813,27 +846,27 @@ static enum uacpi_table_iteration_decision do_search_tables( const uacpi_table_identifiers *id = ctx->id; if (!uacpi_signatures_match(&id->signature, tbl->hdr.signature)) - return UACPI_TABLE_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; if (id->oemid[0] != '\0' && uacpi_memcmp(id->oemid, tbl->hdr.oemid, sizeof(id->oemid)) != 0) - return UACPI_TABLE_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; if (id->oem_table_id[0] != '\0' && uacpi_memcmp(id->oem_table_id, tbl->hdr.oem_table_id, sizeof(id->oem_table_id)) != 0) - return UACPI_TABLE_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; break; } case SEARCH_TYPE_MATCH: if (!ctx->match_cb(tbl)) - return UACPI_TABLE_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; break; default: ctx->status = UACPI_STATUS_INVALID_ARGUMENT; - return UACPI_TABLE_ITERATION_DECISION_BREAK; + return UACPI_ITERATION_DECISION_BREAK; } ret = table_ref_unlocked(tbl); @@ -842,7 +875,7 @@ static enum uacpi_table_iteration_decision do_search_tables( out_table->ptr = tbl->ptr; out_table->index = idx; ctx->status = ret; - return UACPI_TABLE_ITERATION_DECISION_BREAK; + return UACPI_ITERATION_DECISION_BREAK; } /* @@ -850,10 +883,10 @@ static enum uacpi_table_iteration_decision do_search_tables( * existed and go on with the search. */ if (ret == UACPI_STATUS_BAD_CHECKSUM) - return UACPI_TABLE_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; ctx->status = ret; - return UACPI_TABLE_ITERATION_DECISION_BREAK; + return UACPI_ITERATION_DECISION_BREAK; } uacpi_status uacpi_table_match( @@ -963,13 +996,16 @@ struct table_ctl_request { static uacpi_status table_ctl(uacpi_size idx, struct table_ctl_request *req) { - uacpi_status ret = UACPI_STATUS_OK; + uacpi_status ret; struct uacpi_installed_table *tbl; if (!early_table_access) UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); - UACPI_MUTEX_ACQUIRE_IF_EXISTS(table_mutex); + ret = uacpi_acquire_native_mutex_may_be_null(table_mutex); + if (uacpi_unlikely_error(ret)) + return ret; + if (uacpi_unlikely(table_array_size(&tables) <= idx)) { uacpi_error( "requested invalid table index %zu (%zu tables installed)\n", @@ -1029,7 +1065,7 @@ static uacpi_status table_ctl(uacpi_size idx, struct table_ctl_request *req) tbl->flags &= ~req->clear; out: - UACPI_MUTEX_RELEASE_IF_EXISTS(table_mutex); + uacpi_release_native_mutex_may_be_null(table_mutex); return ret; } @@ -1049,19 +1085,11 @@ uacpi_status uacpi_table_load_with_cause( if (uacpi_unlikely_error(ret)) return ret; - /* - * FIXME: - * The reference to the table is leaked intentionally as any created - * methods inside still reference the virtual mapping here. - * - * There are two solutions I can think of: - * 1. Allocate a heap buffer for method code and copy it there, then the - * methods no longer need to execute tables after the first pass. - * 2. Make methods explicitly take references to the table they're a part - * of. This would allows us to drop the leaked reference here after the - * table load. - */ - return uacpi_execute_table(req.out_tbl, cause); + ret = uacpi_execute_table(req.out_tbl, cause); + + req.type = TABLE_CTL_PUT; + table_ctl(idx, &req); + return ret; } uacpi_status uacpi_table_load(uacpi_size idx) diff --git a/drivers/bus/acpi_new/source/types.c b/drivers/bus/acpi_new/source/types.c index b0d225a74f438..29f77d70652d2 100644 --- a/drivers/bus/acpi_new/source/types.c +++ b/drivers/bus/acpi_new/source/types.c @@ -5,6 +5,7 @@ #include #include #include +#include #include const uacpi_char *uacpi_object_type_to_string(uacpi_object_type type) @@ -118,26 +119,38 @@ static uacpi_bool empty_buffer_or_string_alloc(uacpi_object *object) return buffer_alloc(object, 0); } -uacpi_bool uacpi_package_fill(uacpi_package *pkg, uacpi_size num_elements) +uacpi_bool uacpi_package_fill( + uacpi_package *pkg, uacpi_size num_elements, + enum uacpi_prealloc_objects prealloc_objects +) { uacpi_size i; + if (uacpi_unlikely(num_elements == 0)) + return UACPI_TRUE; + pkg->objects = uacpi_kernel_calloc(num_elements, sizeof(uacpi_handle)); if (uacpi_unlikely(pkg->objects == UACPI_NULL)) return UACPI_FALSE; pkg->count = num_elements; - for (i = 0; i < num_elements; ++i) { - pkg->objects[i] = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); - if (uacpi_unlikely(pkg->objects[i] == UACPI_NULL)) - return UACPI_FALSE; + if (prealloc_objects == UACPI_PREALLOC_OBJECTS_YES) { + for (i = 0; i < num_elements; ++i) { + pkg->objects[i] = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + + if (uacpi_unlikely(pkg->objects[i] == UACPI_NULL)) + return UACPI_FALSE; + } } return UACPI_TRUE; } -static uacpi_bool package_alloc(uacpi_object *obj, uacpi_size initial_size) +static uacpi_bool package_alloc( + uacpi_object *obj, uacpi_size initial_size, + enum uacpi_prealloc_objects prealloc +) { uacpi_package *pkg; @@ -147,11 +160,9 @@ static uacpi_bool package_alloc(uacpi_object *obj, uacpi_size initial_size) uacpi_shareable_init(pkg); - if (initial_size) { - if (uacpi_unlikely(!uacpi_package_fill(pkg, initial_size))) { - uacpi_free(pkg, sizeof(*pkg)); - return UACPI_FALSE; - } + if (uacpi_unlikely(!uacpi_package_fill(pkg, initial_size, prealloc))) { + uacpi_free(pkg, sizeof(*pkg)); + return UACPI_FALSE; } obj->package = pkg; @@ -160,7 +171,7 @@ static uacpi_bool package_alloc(uacpi_object *obj, uacpi_size initial_size) static uacpi_bool empty_package_alloc(uacpi_object *object) { - return package_alloc(object, 0); + return package_alloc(object, 0, UACPI_PREALLOC_OBJECTS_NO); } uacpi_mutex *uacpi_create_mutex(void) @@ -548,6 +559,8 @@ static void free_op_region(uacpi_handle handle) ); } + if (op_region->space == UACPI_ADDRESS_SPACE_TABLE_DATA) + uacpi_table_unref(&(struct uacpi_table) { .index = op_region->table_idx }); uacpi_free(op_region, sizeof(*op_region)); } @@ -612,9 +625,16 @@ static void free_method(uacpi_handle handle) method->mutex, free_mutex ); + if (!method->native_call && method->owns_code) + uacpi_free(method->code, method->size); uacpi_free(method, sizeof(*method)); } +void uacpi_method_unref(uacpi_control_method *method) +{ + uacpi_shareable_unref_and_delete_if_last(method, free_method); +} + static void free_object_storage(uacpi_object *obj) { switch (obj->type) { @@ -631,8 +651,7 @@ static void free_object_storage(uacpi_object *obj) free_buffer); break; case UACPI_OBJECT_METHOD: - uacpi_shareable_unref_and_delete_if_last(obj->method, - free_method); + uacpi_method_unref(obj->method); break; case UACPI_OBJECT_PACKAGE: uacpi_shareable_unref_and_delete_if_last(obj->package, @@ -692,14 +711,7 @@ static void make_chain_bugged(uacpi_object *obj) void uacpi_object_ref(uacpi_object *obj) { - uacpi_object *this_obj = obj; - while (obj) { - if (uacpi_unlikely(uacpi_bugged_shareable(obj))) { - make_chain_bugged(this_obj); - return; - } - uacpi_shareable_ref(obj); if (obj->type == UACPI_OBJECT_REFERENCE) @@ -728,25 +740,15 @@ static void free_chain(uacpi_object *obj) void uacpi_object_unref(uacpi_object *obj) { uacpi_object *this_obj = obj; - uacpi_u32 parent_refcount; if (!obj) return; - parent_refcount = obj->shareable.reference_count; - while (obj) { - if (uacpi_unlikely(uacpi_bugged_shareable(obj))) { - make_chain_bugged(this_obj); + if (uacpi_unlikely(uacpi_bugged_shareable(obj))) return; - } - if (uacpi_unlikely(uacpi_shareable_refcount(obj) < parent_refcount)) { - make_chain_bugged(this_obj); - return; - } - - parent_refcount = uacpi_shareable_unref(obj); + uacpi_shareable_unref(obj); if (obj->type == UACPI_OBJECT_REFERENCE) { obj = obj->inner_object; @@ -819,7 +821,8 @@ static uacpi_status deep_copy_package_no_recurse( uacpi_size i; uacpi_package *dst_package; - if (uacpi_unlikely(!package_alloc(dst, src->count))) + if (uacpi_unlikely(!package_alloc(dst, src->count, + UACPI_PREALLOC_OBJECTS_YES))) return UACPI_STATUS_OUT_OF_MEMORY; dst->type = UACPI_OBJECT_PACKAGE; @@ -954,6 +957,386 @@ void uacpi_object_detach_child(uacpi_object *parent) uacpi_object_unref(child); } +uacpi_object_type uacpi_object_get_type(uacpi_object *obj) +{ + return obj->type; +} + +uacpi_object_type_bits uacpi_object_get_type_bit(uacpi_object *obj) +{ + return (1u << obj->type); +} + +uacpi_bool uacpi_object_is(uacpi_object *obj, uacpi_object_type type) +{ + return obj->type == type; +} + +uacpi_bool uacpi_object_is_one_of( + uacpi_object *obj, uacpi_object_type_bits type_mask +) +{ + return (uacpi_object_get_type_bit(obj) & type_mask) != 0; +} + +#define TYPE_CHECK_USER_OBJ_RET(obj, type_bits, ret) \ + do { \ + if (uacpi_unlikely(obj == UACPI_NULL || \ + !uacpi_object_is_one_of(obj, type_bits))) \ + return ret; \ + } while (0) + +#define TYPE_CHECK_USER_OBJ(obj, type_bits) \ + TYPE_CHECK_USER_OBJ_RET(obj, type_bits, UACPI_STATUS_INVALID_ARGUMENT) + +#define ENSURE_VALID_USER_OBJ_RET(obj, ret) \ + do { \ + if (uacpi_unlikely(obj == UACPI_NULL)) \ + return ret; \ + } while (0) + +#define ENSURE_VALID_USER_OBJ(obj) \ + ENSURE_VALID_USER_OBJ_RET(obj, UACPI_STATUS_INVALID_ARGUMENT) + +uacpi_status uacpi_object_get_integer(uacpi_object *obj, uacpi_u64 *out) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_INTEGER_BIT); + + *out = obj->integer; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_assign_integer(uacpi_object *obj, uacpi_u64 value) +{ + ENSURE_VALID_USER_OBJ(obj); + + return uacpi_object_assign(obj, &(uacpi_object) { + .type = UACPI_OBJECT_INTEGER, + .integer = value, + }, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY); +} + +static uacpi_status uacpi_object_do_get_string_or_buffer( + uacpi_object *obj, uacpi_data_view *out, uacpi_u32 mask +) +{ + TYPE_CHECK_USER_OBJ(obj, mask); + + out->bytes = obj->buffer->data; + out->length = obj->buffer->size; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_string_or_buffer( + uacpi_object *obj, uacpi_data_view *out +) +{ + return uacpi_object_do_get_string_or_buffer( + obj, out, UACPI_OBJECT_STRING_BIT | UACPI_OBJECT_BUFFER_BIT + ); +} + +uacpi_status uacpi_object_get_string(uacpi_object *obj, uacpi_data_view *out) +{ + return uacpi_object_do_get_string_or_buffer( + obj, out, UACPI_OBJECT_STRING_BIT + ); +} + +uacpi_status uacpi_object_get_buffer(uacpi_object *obj, uacpi_data_view *out) +{ + return uacpi_object_do_get_string_or_buffer( + obj, out, UACPI_OBJECT_BUFFER_BIT + ); +} + +uacpi_bool uacpi_object_is_aml_namepath(uacpi_object *obj) +{ + TYPE_CHECK_USER_OBJ_RET(obj, UACPI_OBJECT_STRING_BIT, UACPI_FALSE); + return obj->flags == UACPI_STRING_KIND_PATH; +} + +uacpi_status uacpi_object_resolve_as_aml_namepath( + uacpi_object *obj, uacpi_namespace_node *scope, + uacpi_namespace_node **out_node +) +{ + uacpi_status ret; + uacpi_namespace_node *node; + + if (!uacpi_object_is_aml_namepath(obj)) + return UACPI_STATUS_INVALID_ARGUMENT; + + ret = uacpi_namespace_node_resolve_from_aml_namepath( + scope, obj->buffer->text, &node + ); + if (uacpi_likely_success(ret)) + *out_node = node; + return ret; +} + +static uacpi_status uacpi_object_do_assign_buffer( + uacpi_object *obj, uacpi_data_view in, uacpi_object_type type +) +{ + uacpi_status ret; + uacpi_object tmp_obj = { + .type = type, + }; + uacpi_size dst_buf_size = in.length; + + ENSURE_VALID_USER_OBJ(obj); + + if (type == UACPI_OBJECT_STRING && (in.length == 0 || + in.const_bytes[in.length - 1] != 0x00)) + dst_buf_size++; + + ret = buffer_alloc_and_store( + &tmp_obj, dst_buf_size, in.const_bytes, in.length + ); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_object_assign( + obj, &tmp_obj, UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY + ); + uacpi_shareable_unref_and_delete_if_last(tmp_obj.buffer, free_buffer); + + return ret; +} + +uacpi_status uacpi_object_assign_string(uacpi_object *obj, uacpi_data_view in) +{ + return uacpi_object_do_assign_buffer(obj, in, UACPI_OBJECT_STRING); +} + +uacpi_status uacpi_object_assign_buffer(uacpi_object *obj, uacpi_data_view in) +{ + return uacpi_object_do_assign_buffer(obj, in, UACPI_OBJECT_BUFFER); +} + +uacpi_object *uacpi_object_create_uninitialized(void) +{ + return uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); +} + +uacpi_status uacpi_object_create_integer_safe( + uacpi_u64 value, uacpi_overflow_behavior behavior, uacpi_object **out_obj +) +{ + uacpi_status ret; + uacpi_u8 bitness; + uacpi_object *obj; + + ret = uacpi_get_aml_bitness(&bitness); + if (uacpi_unlikely_error(ret)) + return ret; + + switch (behavior) { + case UACPI_OVERFLOW_TRUNCATE: + case UACPI_OVERFLOW_DISALLOW: + if (bitness == 32 && value > 0xFFFFFFFF) { + if (behavior == UACPI_OVERFLOW_DISALLOW) + return UACPI_STATUS_INVALID_ARGUMENT; + + value &= 0xFFFFFFFF; + } + UACPI_FALLTHROUGH; + case UACPI_OVERFLOW_ALLOW: + obj = uacpi_object_create_integer(value); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_STATUS_OUT_OF_MEMORY; + + *out_obj = obj; + return ret; + default: + return UACPI_STATUS_INVALID_ARGUMENT; + } +} + +uacpi_object *uacpi_object_create_integer(uacpi_u64 value) +{ + uacpi_object *obj; + + obj = uacpi_create_object(UACPI_OBJECT_INTEGER); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + obj->integer = value; + return obj; +} + +static uacpi_object *uacpi_object_do_create_string_or_buffer( + uacpi_data_view view, uacpi_object_type type +) +{ + uacpi_status ret; + uacpi_object *obj; + + obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_NULL; + + ret = uacpi_object_do_assign_buffer(obj, view, type); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(obj); + return UACPI_NULL; + } + + return obj; +} + +uacpi_object *uacpi_object_create_string(uacpi_data_view view) +{ + return uacpi_object_do_create_string_or_buffer(view, UACPI_OBJECT_STRING); +} + +uacpi_object *uacpi_object_create_buffer(uacpi_data_view view) +{ + return uacpi_object_do_create_string_or_buffer(view, UACPI_OBJECT_BUFFER); +} + +uacpi_object *uacpi_object_create_cstring(const uacpi_char *str) +{ + return uacpi_object_create_string((uacpi_data_view) { + .const_text = str, + .length = uacpi_strlen(str) + 1, + }); +} + +uacpi_status uacpi_object_get_package( + uacpi_object *obj, uacpi_object_array *out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_PACKAGE_BIT); + + out->objects = obj->package->objects; + out->count = obj->package->count; + return UACPI_STATUS_OK; +} + +uacpi_object *uacpi_object_create_reference(uacpi_object *child) +{ + uacpi_object *obj; + + ENSURE_VALID_USER_OBJ_RET(child, UACPI_NULL); + + obj = uacpi_create_object(UACPI_OBJECT_REFERENCE); + if (uacpi_unlikely(obj == UACPI_NULL)) + return UACPI_NULL; + + uacpi_object_attach_child(obj, child); + obj->flags = UACPI_REFERENCE_KIND_ARG; + + return obj; +} + +uacpi_status uacpi_object_assign_reference( + uacpi_object *obj, uacpi_object *child +) +{ + uacpi_status ret; + + ENSURE_VALID_USER_OBJ(obj); + ENSURE_VALID_USER_OBJ(child); + + // First clear out the object + ret = uacpi_object_assign( + obj, &(uacpi_object) { .type = UACPI_OBJECT_UNINITIALIZED }, + UACPI_ASSIGN_BEHAVIOR_DEEP_COPY + ); + if (uacpi_unlikely_error(ret)) + return ret; + + obj->type = UACPI_OBJECT_REFERENCE; + uacpi_object_attach_child(obj, child); + obj->flags = UACPI_REFERENCE_KIND_ARG; + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_dereferenced( + uacpi_object *obj, uacpi_object **out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_REFERENCE_BIT); + + *out = obj->inner_object; + uacpi_shareable_ref(*out); + + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_processor_info( + uacpi_object *obj, uacpi_processor_info *out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_PROCESSOR_BIT); + + out->id = obj->processor->id; + out->block_address = obj->processor->block_address; + out->block_length = obj->processor->block_length; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_get_power_resource_info( + uacpi_object *obj, uacpi_power_resource_info *out +) +{ + TYPE_CHECK_USER_OBJ(obj, UACPI_OBJECT_POWER_RESOURCE_BIT); + + out->system_level = obj->power_resource.system_level; + out->resource_order = obj->power_resource.resource_order; + return UACPI_STATUS_OK; +} + +uacpi_status uacpi_object_assign_package( + uacpi_object *obj, uacpi_object_array in +) +{ + uacpi_status ret; + uacpi_size i; + uacpi_object tmp_obj = { + .type = UACPI_OBJECT_PACKAGE, + }; + + ENSURE_VALID_USER_OBJ(obj); + + if (uacpi_unlikely(!package_alloc(&tmp_obj, in.count, + UACPI_PREALLOC_OBJECTS_NO))) + return UACPI_STATUS_OUT_OF_MEMORY; + + obj->type = UACPI_OBJECT_PACKAGE; + + for (i = 0; i < in.count; ++i) { + tmp_obj.package->objects[i] = in.objects[i]; + uacpi_object_ref(tmp_obj.package->objects[i]); + } + + ret = uacpi_object_assign(obj, &tmp_obj, UACPI_ASSIGN_BEHAVIOR_SHALLOW_COPY); + uacpi_shareable_unref_and_delete_if_last(tmp_obj.package, free_package); + + return ret; +} + +uacpi_object *uacpi_object_create_package(uacpi_object_array in) +{ + uacpi_status ret; + uacpi_object *obj; + + obj = uacpi_object_create_uninitialized(); + if (uacpi_unlikely(obj == UACPI_NULL)) + return obj; + + ret = uacpi_object_assign_package(obj, in); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(obj); + return UACPI_NULL; + } + + return obj; +} + uacpi_status uacpi_object_assign(uacpi_object *dst, uacpi_object *src, enum uacpi_assign_behavior behavior) { diff --git a/drivers/bus/acpi_new/source/uacpi.c b/drivers/bus/acpi_new/source/uacpi.c index 3d43e3f6986ae..e35555ae0ec73 100644 --- a/drivers/bus/acpi_new/source/uacpi.c +++ b/drivers/bus/acpi_new/source/uacpi.c @@ -1,5 +1,6 @@ #include #include +#include #include #include @@ -10,7 +11,9 @@ #include #include #include +#include #include +#include struct uacpi_runtime_context g_uacpi_rt_ctx = { 0 }; @@ -19,6 +22,9 @@ void uacpi_state_reset(void) uacpi_deinitialize_namespace(); uacpi_deinitialize_interfaces(); uacpi_deinitialize_events(); + uacpi_deinitialize_notify(); + uacpi_deinitialize_opregion(); + uacpi_deininitialize_registers(); uacpi_deinitialize_tables(); #ifndef UACPI_REDUCED_HARDWARE @@ -43,6 +49,14 @@ void uacpi_context_set_log_level(uacpi_log_level lvl) g_uacpi_rt_ctx.log_level = lvl; } +void uacpi_logger_initialize(void) +{ + if (g_uacpi_rt_ctx.log_level != 0) + return; + + uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL); +} + void uacpi_context_set_loop_timeout(uacpi_u32 seconds) { if (seconds == 0) @@ -274,8 +288,8 @@ uacpi_status uacpi_initialize(uacpi_u64 flags) g_uacpi_rt_ctx.s0_sleep_typ_b = UACPI_SLEEP_TYP_INVALID; g_uacpi_rt_ctx.flags = flags; - if (g_uacpi_rt_ctx.log_level == 0) - uacpi_context_set_log_level(UACPI_DEFAULT_LOG_LEVEL); + uacpi_logger_initialize(); + if (g_uacpi_rt_ctx.loop_timeout_seconds == 0) uacpi_context_set_loop_timeout(UACPI_DEFAULT_LOOP_TIMEOUT_SECONDS); if (g_uacpi_rt_ctx.max_call_stack_depth == 0) @@ -285,6 +299,18 @@ uacpi_status uacpi_initialize(uacpi_u64 flags) if (uacpi_unlikely_error(ret)) goto out_fatal_error; + ret = uacpi_ininitialize_registers(); + if (uacpi_unlikely_error(ret)) + return ret; + + ret = uacpi_initialize_events_early(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + + ret = uacpi_initialize_opregion(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + ret = uacpi_initialize_interfaces(); if (uacpi_unlikely_error(ret)) goto out_fatal_error; @@ -293,6 +319,10 @@ uacpi_status uacpi_initialize(uacpi_u64 flags) if (uacpi_unlikely_error(ret)) goto out_fatal_error; + ret = uacpi_initialize_notify(); + if (uacpi_unlikely_error(ret)) + goto out_fatal_error; + uacpi_install_default_address_space_handlers(); if (!uacpi_check_flag(UACPI_FLAG_NO_ACPI_MODE)) { @@ -317,9 +347,8 @@ static void trace_table_load_failure( { uacpi_log_lvl( lvl, - "failed to load '%.4s' (OEM ID '%.6s' OEM Table ID '%.8s'): %s\n", - tbl->signature, tbl->oemid, tbl->oem_table_id, - uacpi_status_to_string(ret) + "failed to load "UACPI_PRI_TBL_HDR": %s\n", + UACPI_FMT_TBL_HDR(tbl), uacpi_status_to_string(ret) ); } @@ -332,10 +361,16 @@ static uacpi_bool match_ssdt_or_psdt(struct uacpi_installed_table *tbl) uacpi_signatures_match(tbl->hdr.signature, ACPI_PSDT_SIGNATURE); } +static uacpi_u64 elapsed_ms(uacpi_u64 begin_ns, uacpi_u64 end_ns) +{ + return (end_ns - begin_ns) / (1000ull * 1000ull); +} + uacpi_status uacpi_namespace_load(void) { struct uacpi_table tbl; uacpi_status ret; + uacpi_u64 begin_ts, end_ts; struct table_load_stats st = { 0 }; uacpi_size cur_index; @@ -347,6 +382,8 @@ uacpi_status uacpi_namespace_load(void) goto out_fatal_error; #endif + begin_ts = uacpi_kernel_get_nanoseconds_since_boot(); + ret = uacpi_table_find_by_signature(ACPI_DSDT_SIGNATURE, &tbl); if (uacpi_unlikely_error(ret)) { uacpi_error("unable to find DSDT: %s\n", uacpi_status_to_string(ret)); @@ -365,7 +402,7 @@ uacpi_status uacpi_namespace_load(void) ret = uacpi_table_match(cur_index, match_ssdt_or_psdt, &tbl); if (ret != UACPI_STATUS_OK) { if (uacpi_unlikely(ret != UACPI_STATUS_NOT_FOUND)) - return ret; + goto out_fatal_error; break; } @@ -379,23 +416,35 @@ uacpi_status uacpi_namespace_load(void) uacpi_table_unref(&tbl); } + end_ts = uacpi_kernel_get_nanoseconds_since_boot(); + if (uacpi_unlikely(st.failure_counter != 0)) { uacpi_info( - "loaded & executed %u AML blob%s (%u error%s)\n", st.load_counter, - st.load_counter > 1 ? "s" : "", st.failure_counter, + "loaded %u AML blob%s in %"UACPI_PRIu64"ms (%u error%s)\n", + st.load_counter, st.load_counter > 1 ? "s" : "", + UACPI_FMT64(elapsed_ms(begin_ts, end_ts)), st.failure_counter, st.failure_counter > 1 ? "s" : "" ); } else { + uacpi_u64 ops = g_uacpi_rt_ctx.opcodes_executed; + uacpi_u64 ops_per_sec = ops * UACPI_NANOSECONDS_PER_SEC; + + if (uacpi_likely(end_ts > begin_ts)) + ops_per_sec /= end_ts - begin_ts; + uacpi_info( - "successfully loaded & executed %u AML blob%s\n", st.load_counter, - st.load_counter > 1 ? "s" : "" + "successfully loaded %u AML blob%s, %"UACPI_PRIu64" ops in " + "%"UACPI_PRIu64"ms (avg %"UACPI_PRIu64"/s)\n", + st.load_counter, st.load_counter > 1 ? "s" : "", + UACPI_FMT64(ops), UACPI_FMT64(elapsed_ms(begin_ts, end_ts)), + UACPI_FMT64(ops_per_sec) ); } ret = uacpi_initialize_events(); if (uacpi_unlikely_error(ret)) { - uacpi_warn("event initialization failed: %s\n", - uacpi_status_to_string(ret)); + uacpi_error("event initialization failed: %s\n", + uacpi_status_to_string(ret)); goto out_fatal_error; } @@ -414,7 +463,6 @@ struct ns_init_context { uacpi_size sta_errors; uacpi_size devices; uacpi_size thermal_zones; - uacpi_size processors; }; static void ini_eval(struct ns_init_context *ctx, uacpi_namespace_node *node) @@ -448,48 +496,42 @@ static uacpi_status sta_eval( return ret; } -static enum uacpi_ns_iteration_decision do_sta_ini( - void *opaque, uacpi_namespace_node *node +static uacpi_iteration_decision do_sta_ini( + void *opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { struct ns_init_context *ctx = opaque; uacpi_status ret; + uacpi_object_type type = UACPI_OBJECT_UNINITIALIZED; uacpi_u32 sta_ret; - uacpi_bool is_sb; - uacpi_object *obj; - // We don't care about aliases - if (node->flags & UACPI_NAMESPACE_NODE_FLAG_ALIAS) - return UACPI_NS_ITERATION_DECISION_NEXT_PEER; - - is_sb = node == uacpi_namespace_get_predefined( - UACPI_PREDEFINED_NAMESPACE_SB - ); + UACPI_UNUSED(depth); - obj = uacpi_namespace_node_get_object(node); - if (node != uacpi_namespace_root() && !is_sb) { - switch (obj->type) { - case UACPI_OBJECT_DEVICE: - ctx->devices++; - break; - case UACPI_OBJECT_THERMAL_ZONE: - ctx->thermal_zones++; - break; - case UACPI_OBJECT_PROCESSOR: - ctx->processors++; - break; - default: - return UACPI_NS_ITERATION_DECISION_CONTINUE; - } + // We don't care about aliases + if (uacpi_namespace_node_is_alias(node)) + return UACPI_ITERATION_DECISION_NEXT_PEER; + + ret = uacpi_namespace_node_type(node, &type); + switch (type) { + case UACPI_OBJECT_DEVICE: + case UACPI_OBJECT_PROCESSOR: + ctx->devices++; + break; + case UACPI_OBJECT_THERMAL_ZONE: + ctx->thermal_zones++; + break; + default: + if (node != uacpi_namespace_get_predefined(UACPI_PREDEFINED_NAMESPACE_TZ)) + return UACPI_ITERATION_DECISION_CONTINUE; } ret = sta_eval(ctx, node, &sta_ret); if (uacpi_unlikely_error(ret)) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; if (!(sta_ret & ACPI_STA_RESULT_DEVICE_PRESENT)) { if (!(sta_ret & ACPI_STA_RESULT_DEVICE_FUNCTIONING)) - return UACPI_NS_ITERATION_DECISION_NEXT_PEER; + return UACPI_ITERATION_DECISION_NEXT_PEER; /* * ACPI 6.5 specification: @@ -500,19 +542,19 @@ static enum uacpi_ns_iteration_decision do_sta_ini( * should continue enumeration below a device whose _STA returns this * bit combination. */ - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } - if (node != uacpi_namespace_root() && !is_sb) - ini_eval(ctx, node); + ini_eval(ctx, node); - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; } uacpi_status uacpi_namespace_initialize(void) { struct ns_init_context ctx = { 0 }; uacpi_namespace_node *root; + uacpi_u64 begin_ts, end_ts; uacpi_address_space_handlers *handlers; uacpi_address_space_handler *handler; uacpi_status ret = UACPI_STATUS_OK; @@ -533,6 +575,8 @@ uacpi_status uacpi_namespace_initialize(void) root = uacpi_namespace_root(); + begin_ts = uacpi_kernel_get_nanoseconds_since_boot(); + // Step 1 - Execute \_INI ini_eval(&ctx, root); @@ -550,18 +594,26 @@ uacpi_status uacpi_namespace_initialize(void) handler = handlers->head; while (handler) { - uacpi_reg_all_opregions(root, handler->space); + if (uacpi_address_space_handler_is_default(handler)) + uacpi_reg_all_opregions(root, handler->space); + handler = handler->next; } } // Step 4 - Run all other _STA and _INI methods - uacpi_namespace_for_each_node_depth_first(root, do_sta_ini, &ctx); + uacpi_namespace_for_each_child( + root, do_sta_ini, UACPI_NULL, + UACPI_OBJECT_ANY_BIT, UACPI_MAX_DEPTH_ANY, &ctx + ); + + end_ts = uacpi_kernel_get_nanoseconds_since_boot(); uacpi_info( - "namespace initialization done: " - "%zu devices, %zu thermal zones, %zu processors\n", - ctx.devices, ctx.thermal_zones, ctx.processors + "namespace initialization done in %"UACPI_PRIu64"ms: " + "%zu devices, %zu thermal zones\n", + UACPI_FMT64(elapsed_ms(begin_ts, end_ts)), + ctx.devices, ctx.thermal_zones ); uacpi_trace( @@ -580,35 +632,104 @@ uacpi_status uacpi_namespace_initialize(void) return ret; } -uacpi_status -uacpi_eval(uacpi_namespace_node *parent, const uacpi_char *path, - const uacpi_args *args, uacpi_object **ret) +uacpi_status uacpi_eval( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **out_obj +) { struct uacpi_namespace_node *node; + uacpi_control_method *method; uacpi_object *obj; + uacpi_status ret = UACPI_STATUS_INVALID_ARGUMENT; - if (parent == UACPI_NULL && path == UACPI_NULL) - return UACPI_STATUS_INVALID_ARGUMENT; + if (uacpi_unlikely(parent == UACPI_NULL && path == UACPI_NULL)) + return ret; + + ret = uacpi_namespace_read_lock(); + if (uacpi_unlikely_error(ret)) + return ret; if (path != UACPI_NULL) { - node = uacpi_namespace_node_find(parent, path); - if (node == UACPI_NULL) - return UACPI_STATUS_NOT_FOUND; + ret = uacpi_namespace_node_resolve( + parent, path, UACPI_SHOULD_LOCK_NO, + UACPI_MAY_SEARCH_ABOVE_PARENT_NO, UACPI_PERMANENT_ONLY_YES, + &node + ); + if (uacpi_unlikely_error(ret)) + goto out_read_unlock; } else { node = parent; } obj = uacpi_namespace_node_get_object(node); + if (uacpi_unlikely(obj == UACPI_NULL)) { + ret = UACPI_STATUS_INVALID_ARGUMENT; + goto out_read_unlock; + } + if (obj->type != UACPI_OBJECT_METHOD) { - if (uacpi_likely(ret != UACPI_NULL)) { - *ret = obj; - uacpi_object_ref(obj); + uacpi_object *new_obj; + + if (uacpi_unlikely(out_obj == UACPI_NULL)) + goto out_read_unlock; + + new_obj = uacpi_create_object(UACPI_OBJECT_UNINITIALIZED); + if (uacpi_unlikely(new_obj == UACPI_NULL)) { + ret = UACPI_STATUS_OUT_OF_MEMORY; + goto out_read_unlock; } - return UACPI_STATUS_OK; + ret = uacpi_object_assign( + new_obj, obj, UACPI_ASSIGN_BEHAVIOR_DEEP_COPY + ); + if (uacpi_unlikely_error(ret)) { + uacpi_object_unref(new_obj); + goto out_read_unlock; + } + *out_obj = new_obj; + + out_read_unlock: + uacpi_namespace_read_unlock(); + return ret; } - return uacpi_execute_control_method(node, obj->method, args, ret); + method = obj->method; + uacpi_shareable_ref(method); + uacpi_namespace_read_unlock(); + + // Upgrade to a write-lock since we're about to run a method + ret = uacpi_namespace_write_lock(); + if (uacpi_unlikely_error(ret)) + goto out_no_write_lock; + + ret = uacpi_execute_control_method(node, method, args, out_obj); + uacpi_namespace_write_unlock(); + +out_no_write_lock: + uacpi_method_unref(method); + return ret; +} + +uacpi_status uacpi_eval_simple( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval(parent, path, UACPI_NULL, ret); +} + +uacpi_status uacpi_execute( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args +) +{ + return uacpi_eval(parent, path, args, UACPI_NULL); +} + +uacpi_status uacpi_execute_simple( + uacpi_namespace_node *parent, const uacpi_char *path +) +{ + return uacpi_eval(parent, path, UACPI_NULL, UACPI_NULL); } #define TRACE_BAD_RET(path_fmt, type, ...) \ @@ -626,7 +747,7 @@ uacpi_eval(uacpi_namespace_node *parent, const uacpi_char *path, static void trace_invalid_return_type( uacpi_namespace_node *parent, const uacpi_char *path, - uacpi_u32 expected_mask, uacpi_object_type actual_type + uacpi_object_type_bits expected_mask, uacpi_object_type actual_type ) { const uacpi_char *abs_path; @@ -658,7 +779,8 @@ static void trace_invalid_return_type( uacpi_status uacpi_eval_typed( uacpi_namespace_node *parent, const uacpi_char *path, - const uacpi_args *args, uacpi_u32 ret_mask, uacpi_object **out_obj + const uacpi_object_array *args, uacpi_object_type_bits ret_mask, + uacpi_object **out_obj ) { uacpi_status ret; @@ -685,9 +807,17 @@ uacpi_status uacpi_eval_typed( return UACPI_STATUS_OK; } +uacpi_status uacpi_eval_simple_typed( + uacpi_namespace_node *parent, const uacpi_char *path, + uacpi_object_type_bits ret_mask, uacpi_object **ret +) +{ + return uacpi_eval_typed(parent, path, UACPI_NULL, ret_mask, ret); +} + uacpi_status uacpi_eval_integer( uacpi_namespace_node *parent, const uacpi_char *path, - const uacpi_args *args, uacpi_u64 *out_value + const uacpi_object_array *args, uacpi_u64 *out_value ) { uacpi_object *int_obj; @@ -704,3 +834,98 @@ uacpi_status uacpi_eval_integer( return UACPI_STATUS_OK; } + +uacpi_status uacpi_eval_simple_integer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_u64 *out_value +) +{ + return uacpi_eval_integer(parent, path, UACPI_NULL, out_value); +} + +uacpi_status uacpi_eval_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, + UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT, + ret + ); +} + +uacpi_status uacpi_eval_simple_buffer_or_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, + UACPI_OBJECT_BUFFER_BIT | UACPI_OBJECT_STRING_BIT, + ret + ); +} + +uacpi_status uacpi_eval_string( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_STRING_BIT, ret + ); +} + +uacpi_status uacpi_eval_simple_string( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, UACPI_OBJECT_STRING_BIT, ret + ); +} + +uacpi_status uacpi_eval_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_BUFFER_BIT, ret + ); +} + +uacpi_status uacpi_eval_simple_buffer( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, UACPI_OBJECT_BUFFER_BIT, ret + ); +} + +uacpi_status uacpi_eval_package( + uacpi_namespace_node *parent, const uacpi_char *path, + const uacpi_object_array *args, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, args, UACPI_OBJECT_PACKAGE_BIT, ret + ); +} + +uacpi_status uacpi_eval_simple_package( + uacpi_namespace_node *parent, const uacpi_char *path, uacpi_object **ret +) +{ + return uacpi_eval_typed( + parent, path, UACPI_NULL, UACPI_OBJECT_PACKAGE_BIT, ret + ); +} + +uacpi_status uacpi_get_aml_bitness(uacpi_u8 *out_bitness) +{ + UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_SUBSYSTEM_INITIALIZED); + + *out_bitness = g_uacpi_rt_ctx.is_rev1 ? 32 : 64; + return UACPI_STATUS_OK; +} diff --git a/drivers/bus/acpi_new/source/utilities.c b/drivers/bus/acpi_new/source/utilities.c index ab00c86903e9e..33da1c12bd303 100644 --- a/drivers/bus/acpi_new/source/utilities.c +++ b/drivers/bus/acpi_new/source/utilities.c @@ -1,10 +1,11 @@ #include #include +#include #include #include #include -#include +#include void uacpi_eisa_id_to_string(uacpi_u32 id, uacpi_char *out_string) { @@ -927,33 +928,26 @@ struct device_find_ctx { uacpi_iteration_callback cb; }; -enum uacpi_ns_iteration_decision find_one_device( - void *opaque, uacpi_namespace_node *node +static uacpi_iteration_decision find_one_device( + void *opaque, uacpi_namespace_node *node, uacpi_u32 depth ) { struct device_find_ctx *ctx = opaque; uacpi_status ret; uacpi_u32 flags; - uacpi_object *obj; - - obj = uacpi_namespace_node_get_object(node); - if (uacpi_unlikely(obj == UACPI_NULL)) - return UACPI_NS_ITERATION_DECISION_CONTINUE; - if (obj->type != UACPI_OBJECT_DEVICE) - return UACPI_NS_ITERATION_DECISION_CONTINUE; if (!uacpi_device_matches_pnp_id(node, ctx->target_hids)) - return UACPI_NS_ITERATION_DECISION_CONTINUE; + return UACPI_ITERATION_DECISION_CONTINUE; ret = uacpi_eval_sta(node, &flags); if (uacpi_unlikely_error(ret)) - return UACPI_NS_ITERATION_DECISION_NEXT_PEER; + return UACPI_ITERATION_DECISION_NEXT_PEER; if (!(flags & ACPI_STA_RESULT_DEVICE_PRESENT) && !(flags & ACPI_STA_RESULT_DEVICE_FUNCTIONING)) - return UACPI_NS_ITERATION_DECISION_NEXT_PEER; + return UACPI_ITERATION_DECISION_NEXT_PEER; - return ctx->cb(ctx->user, node); + return ctx->cb(ctx->user, node, depth); } @@ -970,8 +964,10 @@ uacpi_status uacpi_find_devices_at( .cb = cb, }; - uacpi_namespace_for_each_node_depth_first(parent, find_one_device, &ctx); - return UACPI_STATUS_OK; + return uacpi_namespace_for_each_child( + parent, find_one_device, UACPI_NULL, UACPI_OBJECT_DEVICE_BIT, + UACPI_MAX_DEPTH_ANY, &ctx + ); } uacpi_status uacpi_find_devices( @@ -989,7 +985,7 @@ uacpi_status uacpi_set_interrupt_model(uacpi_interrupt_model model) { uacpi_status ret; uacpi_object *arg; - uacpi_args args; + uacpi_object_array args; UACPI_ENSURE_INIT_LEVEL_AT_LEAST(UACPI_INIT_LEVEL_NAMESPACE_LOADED); @@ -1085,12 +1081,12 @@ uacpi_status uacpi_get_pci_routing_table( elem_obj = entry_pkg->objects[2]; switch (elem_obj->type) { case UACPI_OBJECT_STRING: - entry->source = uacpi_namespace_node_resolve_from_aml_namepath( - parent, elem_obj->buffer->text + ret = uacpi_object_resolve_as_aml_namepath( + elem_obj, parent, &entry->source ); - if (uacpi_unlikely(entry->source == UACPI_NULL)) { - uacpi_error("unable to lookup _PRT source: %s\n", - elem_obj->buffer->text); + if (uacpi_unlikely_error(ret)) { + uacpi_error("unable to lookup _PRT source %s: %s\n", + elem_obj->buffer->text, uacpi_status_to_string(ret)); goto out_bad_encoding; } break; diff --git a/drivers/bus/acpi_new/uacpi_layer.c b/drivers/bus/acpi_new/uacpi_layer.c index 29cd07bcc04c9..481762e63d6cf 100644 --- a/drivers/bus/acpi_new/uacpi_layer.c +++ b/drivers/bus/acpi_new/uacpi_layer.c @@ -86,6 +86,10 @@ uacpi_status uacpi_kernel_pci_read( return 0; } +uacpi_u64 uacpi_kernel_get_nanoseconds_since_boot(void) +{ + return 0; +} uacpi_status uacpi_kernel_pci_write( uacpi_pci_address *address, uacpi_size offset, uacpi_u8 byte_width, uacpi_u64 value @@ -261,7 +265,7 @@ void uacpi_kernel_free_event(uacpi_handle Handle) __debugbreak(); } -uacpi_bool uacpi_kernel_acquire_mutex(uacpi_handle Hadnle, uacpi_u16 Mutex) +uacpi_status uacpi_kernel_acquire_mutex(uacpi_handle Hadnle, uacpi_u16 Mutex) { __debugbreak(); return 0;