From a41806ffbb143599eee3bf5fe941038b696dc8ff Mon Sep 17 00:00:00 2001 From: Wang Fenjin Date: Tue, 15 Nov 2022 07:33:32 +0800 Subject: [PATCH] update to 0.6.0 (#97) Change-Id: I71dd595130e8ed4103cfc06897392074ada41075 --- .github/workflows/rust.yaml | 2 +- Cargo.toml | 4 +- libduckdb-sys/Cargo.toml | 2 +- .../duckdb/bindgen_bundled_version.rs | 19009 +- libduckdb-sys/duckdb/duckdb.cpp | 217819 ++++++++------- libduckdb-sys/duckdb/duckdb.h | 60 +- libduckdb-sys/duckdb/duckdb.hpp | 18202 +- libduckdb-sys/upgrade.sh | 2 +- src/lib.rs | 9 +- src/types/chrono.rs | 32 +- 10 files changed, 136683 insertions(+), 118458 deletions(-) diff --git a/.github/workflows/rust.yaml b/.github/workflows/rust.yaml index bdbf4059..ab89a96b 100644 --- a/.github/workflows/rust.yaml +++ b/.github/workflows/rust.yaml @@ -38,7 +38,7 @@ jobs: name: Download duckdb with: repository: "duckdb/duckdb" - tag: "v0.5.1" + tag: "v0.6.0" fileName: ${{ matrix.duckdb }} out-file-path: . diff --git a/Cargo.toml b/Cargo.toml index 32cf3085..8ab4c38a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "duckdb" -version = "0.5.1" +version = "0.6.0" authors = ["wangfenjin "] edition = "2021" description = "Ergonomic wrapper for DuckDB" @@ -69,7 +69,7 @@ tempdir = "0.3.7" [dependencies.libduckdb-sys] path = "libduckdb-sys" -version = "0.5.1" +version = "0.6.0" [package.metadata.docs.rs] features = [] diff --git a/libduckdb-sys/Cargo.toml b/libduckdb-sys/Cargo.toml index 4fdd01b4..d7e83598 100644 --- a/libduckdb-sys/Cargo.toml +++ b/libduckdb-sys/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libduckdb-sys" -version = "0.5.1" +version = "0.6.0" authors = ["wangfenjin "] edition = "2021" build = "build.rs" diff --git a/libduckdb-sys/duckdb/bindgen_bundled_version.rs b/libduckdb-sys/duckdb/bindgen_bundled_version.rs index 0ba42c10..9c98afe9 100644 --- a/libduckdb-sys/duckdb/bindgen_bundled_version.rs +++ b/libduckdb-sys/duckdb/bindgen_bundled_version.rs @@ -1,4 +1,4 @@ -/* automatically generated by rust-bindgen 0.60.1 */ +/* automatically generated by rust-bindgen 0.61.0 */ #[repr(C)] #[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] @@ -84,9 +84,9 @@ pub const DUCKDB_API_0_3_1: u32 = 1; pub const DUCKDB_API_0_3_2: u32 = 2; pub const DUCKDB_API_LATEST: u32 = 2; pub const DUCKDB_API_VERSION: u32 = 2; +pub const __bool_true_false_are_defined: u32 = 1; pub const true_: u32 = 1; pub const false_: u32 = 0; -pub const __bool_true_false_are_defined: u32 = 1; pub const __WORDSIZE: u32 = 64; pub const __DARWIN_ONLY_64_BIT_INO_T: u32 = 0; pub const __DARWIN_ONLY_UNIX_CONFORMANCE: u32 = 1; @@ -624,6 +624,8 @@ pub union __mbstate_t { } #[test] fn bindgen_test_layout___mbstate_t() { + const UNINIT: ::std::mem::MaybeUninit<__mbstate_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__mbstate_t>(), 128usize, @@ -634,40 +636,26 @@ fn bindgen_test_layout___mbstate_t() { 8usize, concat!("Alignment of ", stringify!(__mbstate_t)) ); - fn test_field___mbstate8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__mbstate_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__mbstate8) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__mbstate_t), - "::", - stringify!(__mbstate8) - ) - ); - } - test_field___mbstate8(); - fn test_field__mbstateL() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__mbstate_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr)._mbstateL) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__mbstate_t), - "::", - stringify!(_mbstateL) - ) - ); - } - test_field__mbstateL(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__mbstate8) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__mbstate_t), + "::", + stringify!(__mbstate8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr)._mbstateL) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__mbstate_t), + "::", + stringify!(_mbstateL) + ) + ); } pub type __darwin_mbstate_t = __mbstate_t; pub type __darwin_ptrdiff_t = ::std::os::raw::c_long; @@ -709,6 +697,8 @@ pub struct __darwin_pthread_handler_rec { } #[test] fn bindgen_test_layout___darwin_pthread_handler_rec() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_pthread_handler_rec> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_pthread_handler_rec>(), 24usize, @@ -719,57 +709,36 @@ fn bindgen_test_layout___darwin_pthread_handler_rec() { 8usize, concat!("Alignment of ", stringify!(__darwin_pthread_handler_rec)) ); - fn test_field___routine() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_pthread_handler_rec>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__routine) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_pthread_handler_rec), - "::", - stringify!(__routine) - ) - ); - } - test_field___routine(); - fn test_field___arg() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_pthread_handler_rec>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__arg) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_pthread_handler_rec), - "::", - stringify!(__arg) - ) - ); - } - test_field___arg(); - fn test_field___next() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_pthread_handler_rec>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__next) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_pthread_handler_rec), - "::", - stringify!(__next) - ) - ); - } - test_field___next(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__routine) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_pthread_handler_rec), + "::", + stringify!(__routine) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__arg) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_pthread_handler_rec), + "::", + stringify!(__arg) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__next) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_pthread_handler_rec), + "::", + stringify!(__next) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -779,6 +748,8 @@ pub struct _opaque_pthread_attr_t { } #[test] fn bindgen_test_layout__opaque_pthread_attr_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_attr_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_attr_t>(), 64usize, @@ -789,40 +760,26 @@ fn bindgen_test_layout__opaque_pthread_attr_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_attr_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_attr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_attr_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_attr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_attr_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_attr_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_attr_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -832,6 +789,8 @@ pub struct _opaque_pthread_cond_t { } #[test] fn bindgen_test_layout__opaque_pthread_cond_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_cond_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_cond_t>(), 48usize, @@ -842,40 +801,26 @@ fn bindgen_test_layout__opaque_pthread_cond_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_cond_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_cond_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_cond_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_cond_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_cond_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_cond_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_cond_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -885,6 +830,8 @@ pub struct _opaque_pthread_condattr_t { } #[test] fn bindgen_test_layout__opaque_pthread_condattr_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_condattr_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_condattr_t>(), 16usize, @@ -895,40 +842,26 @@ fn bindgen_test_layout__opaque_pthread_condattr_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_condattr_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_condattr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_condattr_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_condattr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_condattr_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_condattr_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_condattr_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -938,6 +871,8 @@ pub struct _opaque_pthread_mutex_t { } #[test] fn bindgen_test_layout__opaque_pthread_mutex_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_mutex_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_mutex_t>(), 64usize, @@ -948,40 +883,26 @@ fn bindgen_test_layout__opaque_pthread_mutex_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_mutex_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_mutex_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_mutex_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_mutex_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_mutex_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_mutex_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_mutex_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -991,6 +912,8 @@ pub struct _opaque_pthread_mutexattr_t { } #[test] fn bindgen_test_layout__opaque_pthread_mutexattr_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_mutexattr_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_mutexattr_t>(), 16usize, @@ -1001,40 +924,26 @@ fn bindgen_test_layout__opaque_pthread_mutexattr_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_mutexattr_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_mutexattr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_mutexattr_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_mutexattr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_mutexattr_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_mutexattr_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_mutexattr_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -1044,6 +953,8 @@ pub struct _opaque_pthread_once_t { } #[test] fn bindgen_test_layout__opaque_pthread_once_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_once_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_once_t>(), 16usize, @@ -1054,40 +965,26 @@ fn bindgen_test_layout__opaque_pthread_once_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_once_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_once_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_once_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_once_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_once_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_once_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_once_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -1097,6 +994,8 @@ pub struct _opaque_pthread_rwlock_t { } #[test] fn bindgen_test_layout__opaque_pthread_rwlock_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_rwlock_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_rwlock_t>(), 200usize, @@ -1107,40 +1006,26 @@ fn bindgen_test_layout__opaque_pthread_rwlock_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_rwlock_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_rwlock_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_rwlock_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_rwlock_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_rwlock_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_rwlock_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_rwlock_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -1150,6 +1035,8 @@ pub struct _opaque_pthread_rwlockattr_t { } #[test] fn bindgen_test_layout__opaque_pthread_rwlockattr_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_rwlockattr_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_rwlockattr_t>(), 24usize, @@ -1160,40 +1047,26 @@ fn bindgen_test_layout__opaque_pthread_rwlockattr_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_rwlockattr_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_rwlockattr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_rwlockattr_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_rwlockattr_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_rwlockattr_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_rwlockattr_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_rwlockattr_t), + "::", + stringify!(__opaque) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -1204,6 +1077,8 @@ pub struct _opaque_pthread_t { } #[test] fn bindgen_test_layout__opaque_pthread_t() { + const UNINIT: ::std::mem::MaybeUninit<_opaque_pthread_t> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<_opaque_pthread_t>(), 8192usize, @@ -1214,57 +1089,36 @@ fn bindgen_test_layout__opaque_pthread_t() { 8usize, concat!("Alignment of ", stringify!(_opaque_pthread_t)) ); - fn test_field___sig() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_t), - "::", - stringify!(__sig) - ) - ); - } - test_field___sig(); - fn test_field___cleanup_stack() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__cleanup_stack) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_t), - "::", - stringify!(__cleanup_stack) - ) - ); - } - test_field___cleanup_stack(); - fn test_field___opaque() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<_opaque_pthread_t>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_opaque_pthread_t), - "::", - stringify!(__opaque) - ) - ); - } - test_field___opaque(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sig) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_t), + "::", + stringify!(__sig) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__cleanup_stack) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_t), + "::", + stringify!(__cleanup_stack) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opaque) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(_opaque_pthread_t), + "::", + stringify!(__opaque) + ) + ); } pub type __darwin_pthread_attr_t = _opaque_pthread_attr_t; pub type __darwin_pthread_cond_t = _opaque_pthread_cond_t; @@ -1323,6 +1177,8 @@ pub struct __darwin_i386_thread_state { } #[test] fn bindgen_test_layout___darwin_i386_thread_state() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_i386_thread_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_i386_thread_state>(), 64usize, @@ -1333,278 +1189,166 @@ fn bindgen_test_layout___darwin_i386_thread_state() { 4usize, concat!("Alignment of ", stringify!(__darwin_i386_thread_state)) ); - fn test_field___eax() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__eax) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__eax) - ) - ); - } - test_field___eax(); - fn test_field___ebx() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ebx) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__ebx) - ) - ); - } - test_field___ebx(); - fn test_field___ecx() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ecx) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__ecx) - ) - ); - } - test_field___ecx(); - fn test_field___edx() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__edx) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__edx) - ) - ); - } - test_field___edx(); - fn test_field___edi() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__edi) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__edi) - ) - ); - } - test_field___edi(); - fn test_field___esi() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__esi) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__esi) - ) - ); - } - test_field___esi(); - fn test_field___ebp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ebp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__ebp) - ) - ); - } - test_field___ebp(); - fn test_field___esp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__esp) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__esp) - ) - ); - } - test_field___esp(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___eflags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__eflags) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__eflags) - ) - ); - } - test_field___eflags(); - fn test_field___eip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__eip) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__eip) - ) - ); - } - test_field___eip(); - fn test_field___cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__cs) as usize - ptr as usize - }, - 44usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__cs) - ) - ); - } - test_field___cs(); - fn test_field___ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ds) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__ds) - ) - ); - } - test_field___ds(); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 52usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); - fn test_field___gs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_thread_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__gs) as usize - ptr as usize - }, - 60usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_thread_state), - "::", - stringify!(__gs) - ) - ); - } - test_field___gs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__eax) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__eax) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ebx) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__ebx) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ecx) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__ecx) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__edx) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__edx) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__edi) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__edi) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__esi) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__esi) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ebp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__ebp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__esp) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__esp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__eflags) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__eflags) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__eip) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__eip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__cs) as usize - ptr as usize }, + 44usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ds) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 52usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__fs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__gs) as usize - ptr as usize }, + 60usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_thread_state), + "::", + stringify!(__gs) + ) + ); } #[repr(C)] #[repr(align(2))] @@ -2024,6 +1768,8 @@ pub struct __darwin_mmst_reg { } #[test] fn bindgen_test_layout___darwin_mmst_reg() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mmst_reg> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mmst_reg>(), 16usize, @@ -2034,40 +1780,26 @@ fn bindgen_test_layout___darwin_mmst_reg() { 1usize, concat!("Alignment of ", stringify!(__darwin_mmst_reg)) ); - fn test_field___mmst_reg() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mmst_reg>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__mmst_reg) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mmst_reg), - "::", - stringify!(__mmst_reg) - ) - ); - } - test_field___mmst_reg(); - fn test_field___mmst_rsrv() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mmst_reg>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__mmst_rsrv) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mmst_reg), - "::", - stringify!(__mmst_rsrv) - ) - ); - } - test_field___mmst_rsrv(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__mmst_reg) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mmst_reg), + "::", + stringify!(__mmst_reg) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__mmst_rsrv) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mmst_reg), + "::", + stringify!(__mmst_rsrv) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -2076,6 +1808,8 @@ pub struct __darwin_xmm_reg { } #[test] fn bindgen_test_layout___darwin_xmm_reg() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_xmm_reg> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_xmm_reg>(), 16usize, @@ -2086,23 +1820,16 @@ fn bindgen_test_layout___darwin_xmm_reg() { 1usize, concat!("Alignment of ", stringify!(__darwin_xmm_reg)) ); - fn test_field___xmm_reg() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_xmm_reg>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__xmm_reg) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_xmm_reg), - "::", - stringify!(__xmm_reg) - ) - ); - } - test_field___xmm_reg(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__xmm_reg) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_xmm_reg), + "::", + stringify!(__xmm_reg) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -2111,6 +1838,8 @@ pub struct __darwin_ymm_reg { } #[test] fn bindgen_test_layout___darwin_ymm_reg() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_ymm_reg> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_ymm_reg>(), 32usize, @@ -2121,23 +1850,16 @@ fn bindgen_test_layout___darwin_ymm_reg() { 1usize, concat!("Alignment of ", stringify!(__darwin_ymm_reg)) ); - fn test_field___ymm_reg() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ymm_reg>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ymm_reg) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ymm_reg), - "::", - stringify!(__ymm_reg) - ) - ); - } - test_field___ymm_reg(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ymm_reg) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ymm_reg), + "::", + stringify!(__ymm_reg) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -2146,6 +1868,8 @@ pub struct __darwin_zmm_reg { } #[test] fn bindgen_test_layout___darwin_zmm_reg() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_zmm_reg> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_zmm_reg>(), 64usize, @@ -2156,23 +1880,16 @@ fn bindgen_test_layout___darwin_zmm_reg() { 1usize, concat!("Alignment of ", stringify!(__darwin_zmm_reg)) ); - fn test_field___zmm_reg() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_zmm_reg>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__zmm_reg) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_zmm_reg), - "::", - stringify!(__zmm_reg) - ) - ); - } - test_field___zmm_reg(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__zmm_reg) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_zmm_reg), + "::", + stringify!(__zmm_reg) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -2181,6 +1898,8 @@ pub struct __darwin_opmask_reg { } #[test] fn bindgen_test_layout___darwin_opmask_reg() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_opmask_reg> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_opmask_reg>(), 8usize, @@ -2191,23 +1910,16 @@ fn bindgen_test_layout___darwin_opmask_reg() { 1usize, concat!("Alignment of ", stringify!(__darwin_opmask_reg)) ); - fn test_field___opmask_reg() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_opmask_reg>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__opmask_reg) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_opmask_reg), - "::", - stringify!(__opmask_reg) - ) - ); - } - test_field___opmask_reg(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__opmask_reg) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_opmask_reg), + "::", + stringify!(__opmask_reg) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -2247,6 +1959,8 @@ pub struct __darwin_i386_float_state { } #[test] fn bindgen_test_layout___darwin_i386_float_state() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_i386_float_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_i386_float_state>(), 524usize, @@ -2257,550 +1971,326 @@ fn bindgen_test_layout___darwin_i386_float_state() { 4usize, concat!("Alignment of ", stringify!(__darwin_i386_float_state)) ); - fn test_field___fpu_reserved() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_reserved) - ) - ); - } - test_field___fpu_reserved(); - fn test_field___fpu_fcw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_fcw) - ) - ); - } - test_field___fpu_fcw(); - fn test_field___fpu_fsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_fsw) - ) - ); - } - test_field___fpu_fsw(); - fn test_field___fpu_ftw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_ftw) - ) - ); - } - test_field___fpu_ftw(); - fn test_field___fpu_rsrv1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize - }, - 13usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_rsrv1) - ) - ); - } - test_field___fpu_rsrv1(); - fn test_field___fpu_fop() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize - }, - 14usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_fop) - ) - ); - } - test_field___fpu_fop(); - fn test_field___fpu_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_ip) - ) - ); - } - test_field___fpu_ip(); - fn test_field___fpu_cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_cs) - ) - ); - } - test_field___fpu_cs(); - fn test_field___fpu_rsrv2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize - }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_rsrv2) - ) - ); - } - test_field___fpu_rsrv2(); - fn test_field___fpu_dp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_dp) - ) - ); - } - test_field___fpu_dp(); - fn test_field___fpu_ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_ds) - ) - ); - } - test_field___fpu_ds(); - fn test_field___fpu_rsrv3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize - }, - 30usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_rsrv3) - ) - ); - } - test_field___fpu_rsrv3(); - fn test_field___fpu_mxcsr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_mxcsr) - ) - ); - } - test_field___fpu_mxcsr(); - fn test_field___fpu_mxcsrmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_mxcsrmask) - ) - ); - } - test_field___fpu_mxcsrmask(); - fn test_field___fpu_stmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm0) - ) - ); - } - test_field___fpu_stmm0(); - fn test_field___fpu_stmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm1) - ) - ); - } - test_field___fpu_stmm1(); - fn test_field___fpu_stmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm2) - ) - ); - } - test_field___fpu_stmm2(); - fn test_field___fpu_stmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm3) - ) - ); - } - test_field___fpu_stmm3(); - fn test_field___fpu_stmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm4) - ) - ); - } - test_field___fpu_stmm4(); - fn test_field___fpu_stmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm5) - ) - ); - } - test_field___fpu_stmm5(); - fn test_field___fpu_stmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm6) - ) - ); - } - test_field___fpu_stmm6(); - fn test_field___fpu_stmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_stmm7) - ) - ); - } - test_field___fpu_stmm7(); - fn test_field___fpu_xmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm0) - ) - ); - } - test_field___fpu_xmm0(); - fn test_field___fpu_xmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm1) - ) - ); - } - test_field___fpu_xmm1(); - fn test_field___fpu_xmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm2) - ) - ); - } - test_field___fpu_xmm2(); - fn test_field___fpu_xmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm3) - ) - ); - } - test_field___fpu_xmm3(); - fn test_field___fpu_xmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm4) - ) - ); - } - test_field___fpu_xmm4(); - fn test_field___fpu_xmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm5) - ) - ); - } - test_field___fpu_xmm5(); - fn test_field___fpu_xmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm6) - ) - ); - } - test_field___fpu_xmm6(); - fn test_field___fpu_xmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_xmm7) - ) - ); - } - test_field___fpu_xmm7(); - fn test_field___fpu_rsrv4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_rsrv4) - ) - ); - } - test_field___fpu_rsrv4(); - fn test_field___fpu_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_float_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize - }, - 520usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_float_state), - "::", - stringify!(__fpu_reserved1) - ) - ); - } - test_field___fpu_reserved1(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_reserved) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_fcw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_fsw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_ftw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize }, + 13usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_rsrv1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize }, + 14usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_fop) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize }, + 22usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_rsrv2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_dp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize }, + 30usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_rsrv3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_mxcsr) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_mxcsrmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_stmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_xmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_rsrv4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_float_state), + "::", + stringify!(__fpu_reserved1) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -2849,6 +2339,8 @@ pub struct __darwin_i386_avx_state { } #[test] fn bindgen_test_layout___darwin_i386_avx_state() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_i386_avx_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_i386_avx_state>(), 716usize, @@ -2859,703 +2351,416 @@ fn bindgen_test_layout___darwin_i386_avx_state() { 4usize, concat!("Alignment of ", stringify!(__darwin_i386_avx_state)) ); - fn test_field___fpu_reserved() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_reserved) - ) - ); - } - test_field___fpu_reserved(); - fn test_field___fpu_fcw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_fcw) - ) - ); - } - test_field___fpu_fcw(); - fn test_field___fpu_fsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_fsw) - ) - ); - } - test_field___fpu_fsw(); - fn test_field___fpu_ftw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ftw) - ) - ); - } - test_field___fpu_ftw(); - fn test_field___fpu_rsrv1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize - }, - 13usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_rsrv1) - ) - ); - } - test_field___fpu_rsrv1(); - fn test_field___fpu_fop() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize - }, - 14usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_fop) - ) - ); - } - test_field___fpu_fop(); - fn test_field___fpu_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ip) - ) - ); - } - test_field___fpu_ip(); - fn test_field___fpu_cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_cs) - ) - ); - } - test_field___fpu_cs(); - fn test_field___fpu_rsrv2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize - }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_rsrv2) - ) - ); - } - test_field___fpu_rsrv2(); - fn test_field___fpu_dp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_dp) - ) - ); - } - test_field___fpu_dp(); - fn test_field___fpu_ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ds) - ) - ); - } - test_field___fpu_ds(); - fn test_field___fpu_rsrv3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize - }, - 30usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_rsrv3) - ) - ); - } - test_field___fpu_rsrv3(); - fn test_field___fpu_mxcsr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_mxcsr) - ) - ); - } - test_field___fpu_mxcsr(); - fn test_field___fpu_mxcsrmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_mxcsrmask) - ) - ); - } - test_field___fpu_mxcsrmask(); - fn test_field___fpu_stmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm0) - ) - ); - } - test_field___fpu_stmm0(); - fn test_field___fpu_stmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm1) - ) - ); - } - test_field___fpu_stmm1(); - fn test_field___fpu_stmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm2) - ) - ); - } - test_field___fpu_stmm2(); - fn test_field___fpu_stmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm3) - ) - ); - } - test_field___fpu_stmm3(); - fn test_field___fpu_stmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm4) - ) - ); - } - test_field___fpu_stmm4(); - fn test_field___fpu_stmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm5) - ) - ); - } - test_field___fpu_stmm5(); - fn test_field___fpu_stmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm6) - ) - ); - } - test_field___fpu_stmm6(); - fn test_field___fpu_stmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_stmm7) - ) - ); - } - test_field___fpu_stmm7(); - fn test_field___fpu_xmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm0) - ) - ); - } - test_field___fpu_xmm0(); - fn test_field___fpu_xmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm1) - ) - ); - } - test_field___fpu_xmm1(); - fn test_field___fpu_xmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm2) - ) - ); - } - test_field___fpu_xmm2(); - fn test_field___fpu_xmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm3) - ) - ); - } - test_field___fpu_xmm3(); - fn test_field___fpu_xmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm4) - ) - ); - } - test_field___fpu_xmm4(); - fn test_field___fpu_xmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm5) - ) - ); - } - test_field___fpu_xmm5(); - fn test_field___fpu_xmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm6) - ) - ); - } - test_field___fpu_xmm6(); - fn test_field___fpu_xmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_xmm7) - ) - ); - } - test_field___fpu_xmm7(); - fn test_field___fpu_rsrv4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_rsrv4) - ) - ); - } - test_field___fpu_rsrv4(); - fn test_field___fpu_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize - }, - 520usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_reserved1) - ) - ); - } - test_field___fpu_reserved1(); - fn test_field___avx_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize - }, - 524usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__avx_reserved1) - ) - ); - } - test_field___avx_reserved1(); - fn test_field___fpu_ymmh0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize - }, - 588usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh0) - ) - ); - } - test_field___fpu_ymmh0(); - fn test_field___fpu_ymmh1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize - }, - 604usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh1) - ) - ); - } - test_field___fpu_ymmh1(); - fn test_field___fpu_ymmh2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize - }, - 620usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh2) - ) - ); - } - test_field___fpu_ymmh2(); - fn test_field___fpu_ymmh3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize - }, - 636usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh3) - ) - ); - } - test_field___fpu_ymmh3(); - fn test_field___fpu_ymmh4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize - }, - 652usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh4) - ) - ); - } - test_field___fpu_ymmh4(); - fn test_field___fpu_ymmh5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize - }, - 668usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh5) - ) - ); - } - test_field___fpu_ymmh5(); - fn test_field___fpu_ymmh6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize - }, - 684usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh6) - ) - ); - } - test_field___fpu_ymmh6(); - fn test_field___fpu_ymmh7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize - }, - 700usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx_state), - "::", - stringify!(__fpu_ymmh7) - ) - ); - } - test_field___fpu_ymmh7(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_reserved) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_fcw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_fsw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ftw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize }, + 13usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_rsrv1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize }, + 14usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_fop) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize }, + 22usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_rsrv2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_dp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize }, + 30usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_rsrv3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_mxcsr) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_mxcsrmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_stmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_xmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_rsrv4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize }, + 524usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__avx_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize }, + 588usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize }, + 604usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize }, + 620usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize }, + 636usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize }, + 652usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize }, + 668usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize }, + 684usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize }, + 700usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx_state), + "::", + stringify!(__fpu_ymmh7) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -3620,6 +2825,8 @@ pub struct __darwin_i386_avx512_state { } #[test] fn bindgen_test_layout___darwin_i386_avx512_state() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_i386_avx512_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_i386_avx512_state>(), 1036usize, @@ -3630,975 +2837,576 @@ fn bindgen_test_layout___darwin_i386_avx512_state() { 4usize, concat!("Alignment of ", stringify!(__darwin_i386_avx512_state)) ); - fn test_field___fpu_reserved() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_reserved) - ) - ); - } - test_field___fpu_reserved(); - fn test_field___fpu_fcw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_fcw) - ) - ); - } - test_field___fpu_fcw(); - fn test_field___fpu_fsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_fsw) - ) - ); - } - test_field___fpu_fsw(); - fn test_field___fpu_ftw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ftw) - ) - ); - } - test_field___fpu_ftw(); - fn test_field___fpu_rsrv1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize - }, - 13usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_rsrv1) - ) - ); - } - test_field___fpu_rsrv1(); - fn test_field___fpu_fop() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize - }, - 14usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_fop) - ) - ); - } - test_field___fpu_fop(); - fn test_field___fpu_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ip) - ) - ); - } - test_field___fpu_ip(); - fn test_field___fpu_cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_cs) - ) - ); - } - test_field___fpu_cs(); - fn test_field___fpu_rsrv2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize - }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_rsrv2) - ) - ); - } - test_field___fpu_rsrv2(); - fn test_field___fpu_dp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_dp) - ) - ); - } - test_field___fpu_dp(); - fn test_field___fpu_ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ds) - ) - ); - } - test_field___fpu_ds(); - fn test_field___fpu_rsrv3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize - }, - 30usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_rsrv3) - ) - ); - } - test_field___fpu_rsrv3(); - fn test_field___fpu_mxcsr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_mxcsr) - ) - ); - } - test_field___fpu_mxcsr(); - fn test_field___fpu_mxcsrmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_mxcsrmask) - ) - ); - } - test_field___fpu_mxcsrmask(); - fn test_field___fpu_stmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm0) - ) - ); - } - test_field___fpu_stmm0(); - fn test_field___fpu_stmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm1) - ) - ); - } - test_field___fpu_stmm1(); - fn test_field___fpu_stmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm2) - ) - ); - } - test_field___fpu_stmm2(); - fn test_field___fpu_stmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm3) - ) - ); - } - test_field___fpu_stmm3(); - fn test_field___fpu_stmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm4) - ) - ); - } - test_field___fpu_stmm4(); - fn test_field___fpu_stmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm5) - ) - ); - } - test_field___fpu_stmm5(); - fn test_field___fpu_stmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm6) - ) - ); - } - test_field___fpu_stmm6(); - fn test_field___fpu_stmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_stmm7) - ) - ); - } - test_field___fpu_stmm7(); - fn test_field___fpu_xmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm0) - ) - ); - } - test_field___fpu_xmm0(); - fn test_field___fpu_xmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm1) - ) - ); - } - test_field___fpu_xmm1(); - fn test_field___fpu_xmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm2) - ) - ); - } - test_field___fpu_xmm2(); - fn test_field___fpu_xmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm3) - ) - ); - } - test_field___fpu_xmm3(); - fn test_field___fpu_xmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm4) - ) - ); - } - test_field___fpu_xmm4(); - fn test_field___fpu_xmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm5) - ) - ); - } - test_field___fpu_xmm5(); - fn test_field___fpu_xmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm6) - ) - ); - } - test_field___fpu_xmm6(); - fn test_field___fpu_xmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_xmm7) - ) - ); - } - test_field___fpu_xmm7(); - fn test_field___fpu_rsrv4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_rsrv4) - ) - ); - } - test_field___fpu_rsrv4(); - fn test_field___fpu_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize - }, - 520usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_reserved1) - ) - ); - } - test_field___fpu_reserved1(); - fn test_field___avx_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize - }, - 524usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__avx_reserved1) - ) - ); - } - test_field___avx_reserved1(); - fn test_field___fpu_ymmh0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize - }, - 588usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh0) - ) - ); - } - test_field___fpu_ymmh0(); - fn test_field___fpu_ymmh1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize - }, - 604usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh1) - ) - ); - } - test_field___fpu_ymmh1(); - fn test_field___fpu_ymmh2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize - }, - 620usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh2) - ) - ); - } - test_field___fpu_ymmh2(); - fn test_field___fpu_ymmh3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize - }, - 636usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh3) - ) - ); - } - test_field___fpu_ymmh3(); - fn test_field___fpu_ymmh4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize - }, - 652usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh4) - ) - ); - } - test_field___fpu_ymmh4(); - fn test_field___fpu_ymmh5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize - }, - 668usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh5) - ) - ); - } - test_field___fpu_ymmh5(); - fn test_field___fpu_ymmh6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize - }, - 684usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh6) - ) - ); - } - test_field___fpu_ymmh6(); - fn test_field___fpu_ymmh7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize - }, - 700usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_ymmh7) - ) - ); - } - test_field___fpu_ymmh7(); - fn test_field___fpu_k0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k0) as usize - ptr as usize - }, - 716usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k0) - ) - ); - } - test_field___fpu_k0(); - fn test_field___fpu_k1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k1) as usize - ptr as usize - }, - 724usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k1) - ) - ); - } - test_field___fpu_k1(); - fn test_field___fpu_k2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k2) as usize - ptr as usize - }, - 732usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k2) - ) - ); - } - test_field___fpu_k2(); - fn test_field___fpu_k3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k3) as usize - ptr as usize - }, - 740usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k3) - ) - ); - } - test_field___fpu_k3(); - fn test_field___fpu_k4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k4) as usize - ptr as usize - }, - 748usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k4) - ) - ); - } - test_field___fpu_k4(); - fn test_field___fpu_k5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k5) as usize - ptr as usize - }, - 756usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k5) - ) - ); - } - test_field___fpu_k5(); - fn test_field___fpu_k6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k6) as usize - ptr as usize - }, - 764usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k6) - ) - ); - } - test_field___fpu_k6(); - fn test_field___fpu_k7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k7) as usize - ptr as usize - }, - 772usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_k7) - ) - ); - } - test_field___fpu_k7(); - fn test_field___fpu_zmmh0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh0) as usize - ptr as usize - }, - 780usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh0) - ) - ); - } - test_field___fpu_zmmh0(); - fn test_field___fpu_zmmh1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh1) as usize - ptr as usize - }, - 812usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh1) - ) - ); - } - test_field___fpu_zmmh1(); - fn test_field___fpu_zmmh2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh2) as usize - ptr as usize - }, - 844usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh2) - ) - ); - } - test_field___fpu_zmmh2(); - fn test_field___fpu_zmmh3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh3) as usize - ptr as usize - }, - 876usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh3) - ) - ); - } - test_field___fpu_zmmh3(); - fn test_field___fpu_zmmh4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh4) as usize - ptr as usize - }, - 908usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh4) - ) - ); - } - test_field___fpu_zmmh4(); - fn test_field___fpu_zmmh5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh5) as usize - ptr as usize - }, - 940usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh5) - ) - ); - } - test_field___fpu_zmmh5(); - fn test_field___fpu_zmmh6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh6) as usize - ptr as usize - }, - 972usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh6) - ) - ); - } - test_field___fpu_zmmh6(); - fn test_field___fpu_zmmh7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_avx512_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh7) as usize - ptr as usize - }, - 1004usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_avx512_state), - "::", - stringify!(__fpu_zmmh7) - ) - ); - } - test_field___fpu_zmmh7(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_reserved) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_fcw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_fsw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ftw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize }, + 13usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_rsrv1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize }, + 14usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_fop) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize }, + 22usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_rsrv2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_dp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize }, + 30usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_rsrv3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_mxcsr) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_mxcsrmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_stmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_xmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_rsrv4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize }, + 524usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__avx_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize }, + 588usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize }, + 604usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize }, + 620usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize }, + 636usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize }, + 652usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize }, + 668usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize }, + 684usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize }, + 700usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_ymmh7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k0) as usize - ptr as usize }, + 716usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k1) as usize - ptr as usize }, + 724usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k2) as usize - ptr as usize }, + 732usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k3) as usize - ptr as usize }, + 740usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k4) as usize - ptr as usize }, + 748usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k5) as usize - ptr as usize }, + 756usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k6) as usize - ptr as usize }, + 764usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k7) as usize - ptr as usize }, + 772usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_k7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh0) as usize - ptr as usize }, + 780usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh1) as usize - ptr as usize }, + 812usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh2) as usize - ptr as usize }, + 844usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh3) as usize - ptr as usize }, + 876usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh4) as usize - ptr as usize }, + 908usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh5) as usize - ptr as usize }, + 940usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh6) as usize - ptr as usize }, + 972usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh7) as usize - ptr as usize }, + 1004usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_avx512_state), + "::", + stringify!(__fpu_zmmh7) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -4610,6 +3418,8 @@ pub struct __darwin_i386_exception_state { } #[test] fn bindgen_test_layout___darwin_i386_exception_state() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_i386_exception_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_i386_exception_state>(), 12usize, @@ -4620,74 +3430,46 @@ fn bindgen_test_layout___darwin_i386_exception_state() { 4usize, concat!("Alignment of ", stringify!(__darwin_i386_exception_state)) ); - fn test_field___trapno() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_exception_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__trapno) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_exception_state), - "::", - stringify!(__trapno) - ) - ); - } - test_field___trapno(); - fn test_field___cpu() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_exception_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__cpu) as usize - ptr as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_exception_state), - "::", - stringify!(__cpu) - ) - ); - } - test_field___cpu(); - fn test_field___err() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_exception_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__err) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_exception_state), - "::", - stringify!(__err) - ) - ); - } - test_field___err(); - fn test_field___faultvaddr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_i386_exception_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__faultvaddr) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_i386_exception_state), - "::", - stringify!(__faultvaddr) - ) - ); - } - test_field___faultvaddr(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__trapno) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_exception_state), + "::", + stringify!(__trapno) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__cpu) as usize - ptr as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_exception_state), + "::", + stringify!(__cpu) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__err) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_exception_state), + "::", + stringify!(__err) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__faultvaddr) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_i386_exception_state), + "::", + stringify!(__faultvaddr) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -4703,6 +3485,8 @@ pub struct __darwin_x86_debug_state32 { } #[test] fn bindgen_test_layout___darwin_x86_debug_state32() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_debug_state32> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_debug_state32>(), 32usize, @@ -4713,142 +3497,86 @@ fn bindgen_test_layout___darwin_x86_debug_state32() { 4usize, concat!("Alignment of ", stringify!(__darwin_x86_debug_state32)) ); - fn test_field___dr0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr0) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr0) - ) - ); - } - test_field___dr0(); - fn test_field___dr1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr1) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr1) - ) - ); - } - test_field___dr1(); - fn test_field___dr2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr2) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr2) - ) - ); - } - test_field___dr2(); - fn test_field___dr3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr3) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr3) - ) - ); - } - test_field___dr3(); - fn test_field___dr4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr4) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr4) - ) - ); - } - test_field___dr4(); - fn test_field___dr5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr5) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr5) - ) - ); - } - test_field___dr5(); - fn test_field___dr6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr6) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr6) - ) - ); - } - test_field___dr6(); - fn test_field___dr7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr7) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state32), - "::", - stringify!(__dr7) - ) - ); - } - test_field___dr7(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr0) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr1) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr2) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr3) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr4) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr5) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr6) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr7) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state32), + "::", + stringify!(__dr7) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -4861,6 +3589,8 @@ pub struct __x86_instruction_state { } #[test] fn bindgen_test_layout___x86_instruction_state() { + const UNINIT: ::std::mem::MaybeUninit<__x86_instruction_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__x86_instruction_state>(), 2456usize, @@ -4871,91 +3601,56 @@ fn bindgen_test_layout___x86_instruction_state() { 4usize, concat!("Alignment of ", stringify!(__x86_instruction_state)) ); - fn test_field___insn_stream_valid_bytes() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__x86_instruction_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__insn_stream_valid_bytes) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__x86_instruction_state), - "::", - stringify!(__insn_stream_valid_bytes) - ) - ); - } - test_field___insn_stream_valid_bytes(); - fn test_field___insn_offset() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__x86_instruction_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__insn_offset) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__x86_instruction_state), - "::", - stringify!(__insn_offset) - ) - ); - } - test_field___insn_offset(); - fn test_field___out_of_synch() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__x86_instruction_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__out_of_synch) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__x86_instruction_state), - "::", - stringify!(__out_of_synch) - ) - ); - } - test_field___out_of_synch(); - fn test_field___insn_bytes() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__x86_instruction_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__insn_bytes) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__x86_instruction_state), - "::", - stringify!(__insn_bytes) - ) - ); - } - test_field___insn_bytes(); - fn test_field___insn_cacheline() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__x86_instruction_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__insn_cacheline) as usize - ptr as usize - }, - 2392usize, - concat!( - "Offset of field: ", - stringify!(__x86_instruction_state), - "::", - stringify!(__insn_cacheline) - ) - ); - } - test_field___insn_cacheline(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__insn_stream_valid_bytes) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__x86_instruction_state), + "::", + stringify!(__insn_stream_valid_bytes) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__insn_offset) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__x86_instruction_state), + "::", + stringify!(__insn_offset) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__out_of_synch) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__x86_instruction_state), + "::", + stringify!(__out_of_synch) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__insn_bytes) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__x86_instruction_state), + "::", + stringify!(__insn_bytes) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__insn_cacheline) as usize - ptr as usize }, + 2392usize, + concat!( + "Offset of field: ", + stringify!(__x86_instruction_state), + "::", + stringify!(__insn_cacheline) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -4968,6 +3663,8 @@ pub struct __last_branch_record { } #[test] fn bindgen_test_layout___last_branch_record() { + const UNINIT: ::std::mem::MaybeUninit<__last_branch_record> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__last_branch_record>(), 24usize, @@ -4978,40 +3675,26 @@ fn bindgen_test_layout___last_branch_record() { 8usize, concat!("Alignment of ", stringify!(__last_branch_record)) ); - fn test_field___from_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__last_branch_record>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__from_ip) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__last_branch_record), - "::", - stringify!(__from_ip) - ) - ); - } - test_field___from_ip(); - fn test_field___to_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__last_branch_record>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__to_ip) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__last_branch_record), - "::", - stringify!(__to_ip) - ) - ); - } - test_field___to_ip(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__from_ip) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__last_branch_record), + "::", + stringify!(__from_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__to_ip) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__last_branch_record), + "::", + stringify!(__to_ip) + ) + ); } impl __last_branch_record { #[inline] @@ -5111,6 +3794,8 @@ pub struct __last_branch_state { } #[test] fn bindgen_test_layout___last_branch_state() { + const UNINIT: ::std::mem::MaybeUninit<__last_branch_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__last_branch_state>(), 776usize, @@ -5121,40 +3806,26 @@ fn bindgen_test_layout___last_branch_state() { 8usize, concat!("Alignment of ", stringify!(__last_branch_state)) ); - fn test_field___lbr_count() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__last_branch_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__lbr_count) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__last_branch_state), - "::", - stringify!(__lbr_count) - ) - ); - } - test_field___lbr_count(); - fn test_field___lbrs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__last_branch_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__lbrs) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__last_branch_state), - "::", - stringify!(__lbrs) - ) - ); - } - test_field___lbrs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__lbr_count) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__last_branch_state), + "::", + stringify!(__lbr_count) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__lbrs) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__last_branch_state), + "::", + stringify!(__lbrs) + ) + ); } impl __last_branch_state { #[inline] @@ -5219,6 +3890,8 @@ pub struct __x86_pagein_state { } #[test] fn bindgen_test_layout___x86_pagein_state() { + const UNINIT: ::std::mem::MaybeUninit<__x86_pagein_state> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__x86_pagein_state>(), 4usize, @@ -5229,23 +3902,16 @@ fn bindgen_test_layout___x86_pagein_state() { 4usize, concat!("Alignment of ", stringify!(__x86_pagein_state)) ); - fn test_field___pagein_error() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__x86_pagein_state>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__pagein_error) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__x86_pagein_state), - "::", - stringify!(__pagein_error) - ) - ); - } - test_field___pagein_error(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__pagein_error) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__x86_pagein_state), + "::", + stringify!(__pagein_error) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -5274,6 +3940,8 @@ pub struct __darwin_x86_thread_state64 { } #[test] fn bindgen_test_layout___darwin_x86_thread_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_thread_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_thread_state64>(), 168usize, @@ -5284,368 +3952,221 @@ fn bindgen_test_layout___darwin_x86_thread_state64() { 8usize, concat!("Alignment of ", stringify!(__darwin_x86_thread_state64)) ); - fn test_field___rax() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rax) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rax) - ) - ); - } - test_field___rax(); - fn test_field___rbx() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rbx) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rbx) - ) - ); - } - test_field___rbx(); - fn test_field___rcx() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rcx) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rcx) - ) - ); - } - test_field___rcx(); - fn test_field___rdx() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rdx) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rdx) - ) - ); - } - test_field___rdx(); - fn test_field___rdi() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rdi) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rdi) - ) - ); - } - test_field___rdi(); - fn test_field___rsi() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rsi) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rsi) - ) - ); - } - test_field___rsi(); - fn test_field___rbp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rbp) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rbp) - ) - ); - } - test_field___rbp(); - fn test_field___rsp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rsp) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rsp) - ) - ); - } - test_field___rsp(); - fn test_field___r8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r8) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r8) - ) - ); - } - test_field___r8(); - fn test_field___r9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r9) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r9) - ) - ); - } - test_field___r9(); - fn test_field___r10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r10) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r10) - ) - ); - } - test_field___r10(); - fn test_field___r11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r11) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r11) - ) - ); - } - test_field___r11(); - fn test_field___r12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r12) as usize - ptr as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r12) - ) - ); - } - test_field___r12(); - fn test_field___r13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r13) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r13) - ) - ); - } - test_field___r13(); - fn test_field___r14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r14) as usize - ptr as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r14) - ) - ); - } - test_field___r14(); - fn test_field___r15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__r15) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__r15) - ) - ); - } - test_field___r15(); - fn test_field___rip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rip) as usize - ptr as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rip) - ) - ); - } - test_field___rip(); - fn test_field___rflags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__rflags) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__rflags) - ) - ); - } - test_field___rflags(); - fn test_field___cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__cs) as usize - ptr as usize - }, - 144usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__cs) - ) - ); - } - test_field___cs(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); - fn test_field___gs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__gs) as usize - ptr as usize - }, - 160usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_state64), - "::", - stringify!(__gs) - ) - ); - } - test_field___gs(); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct __darwin_x86_thread_full_state64 { - pub __ss64: __darwin_x86_thread_state64, + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rax) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rax) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rbx) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rbx) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rcx) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rcx) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rdx) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rdx) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rdi) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rdi) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rsi) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rsi) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rbp) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rbp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rsp) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rsp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r8) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r9) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r10) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r11) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r12) as usize - ptr as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r13) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r14) as usize - ptr as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__r15) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__r15) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rip) as usize - ptr as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__rflags) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__rflags) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__cs) as usize - ptr as usize }, + 144usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__fs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__gs) as usize - ptr as usize }, + 160usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_state64), + "::", + stringify!(__gs) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __darwin_x86_thread_full_state64 { + pub __ss64: __darwin_x86_thread_state64, pub __ds: __uint64_t, pub __es: __uint64_t, pub __ss: __uint64_t, @@ -5653,6 +4174,8 @@ pub struct __darwin_x86_thread_full_state64 { } #[test] fn bindgen_test_layout___darwin_x86_thread_full_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_thread_full_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_thread_full_state64>(), 200usize, @@ -5663,91 +4186,56 @@ fn bindgen_test_layout___darwin_x86_thread_full_state64() { 8usize, concat!("Alignment of ", stringify!(__darwin_x86_thread_full_state64)) ); - fn test_field___ss64() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_full_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss64) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_full_state64), - "::", - stringify!(__ss64) - ) - ); - } - test_field___ss64(); - fn test_field___ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_full_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ds) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_full_state64), - "::", - stringify!(__ds) - ) - ); - } - test_field___ds(); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_full_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 176usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_full_state64), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_full_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_full_state64), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___gsbase() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_thread_full_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__gsbase) as usize - ptr as usize - }, - 192usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_thread_full_state64), - "::", - stringify!(__gsbase) - ) - ); - } - test_field___gsbase(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss64) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_full_state64), + "::", + stringify!(__ss64) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ds) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_full_state64), + "::", + stringify!(__ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 176usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_full_state64), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_full_state64), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__gsbase) as usize - ptr as usize }, + 192usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_thread_full_state64), + "::", + stringify!(__gsbase) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -5795,6 +4283,8 @@ pub struct __darwin_x86_float_state64 { } #[test] fn bindgen_test_layout___darwin_x86_float_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_float_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_float_state64>(), 524usize, @@ -5805,686 +4295,406 @@ fn bindgen_test_layout___darwin_x86_float_state64() { 4usize, concat!("Alignment of ", stringify!(__darwin_x86_float_state64)) ); - fn test_field___fpu_reserved() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_reserved) - ) - ); - } - test_field___fpu_reserved(); - fn test_field___fpu_fcw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_fcw) - ) - ); - } - test_field___fpu_fcw(); - fn test_field___fpu_fsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_fsw) - ) - ); - } - test_field___fpu_fsw(); - fn test_field___fpu_ftw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_ftw) - ) - ); - } - test_field___fpu_ftw(); - fn test_field___fpu_rsrv1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize - }, - 13usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_rsrv1) - ) - ); - } - test_field___fpu_rsrv1(); - fn test_field___fpu_fop() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize - }, - 14usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_fop) - ) - ); - } - test_field___fpu_fop(); - fn test_field___fpu_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_ip) - ) - ); - } - test_field___fpu_ip(); - fn test_field___fpu_cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_cs) - ) - ); - } - test_field___fpu_cs(); - fn test_field___fpu_rsrv2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize - }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_rsrv2) - ) - ); - } - test_field___fpu_rsrv2(); - fn test_field___fpu_dp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_dp) - ) - ); - } - test_field___fpu_dp(); - fn test_field___fpu_ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_ds) - ) - ); - } - test_field___fpu_ds(); - fn test_field___fpu_rsrv3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize - }, - 30usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_rsrv3) - ) - ); - } - test_field___fpu_rsrv3(); - fn test_field___fpu_mxcsr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_mxcsr) - ) - ); - } - test_field___fpu_mxcsr(); - fn test_field___fpu_mxcsrmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_mxcsrmask) - ) - ); - } - test_field___fpu_mxcsrmask(); - fn test_field___fpu_stmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm0) - ) - ); - } - test_field___fpu_stmm0(); - fn test_field___fpu_stmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm1) - ) - ); - } - test_field___fpu_stmm1(); - fn test_field___fpu_stmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm2) - ) - ); - } - test_field___fpu_stmm2(); - fn test_field___fpu_stmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm3) - ) - ); - } - test_field___fpu_stmm3(); - fn test_field___fpu_stmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm4) - ) - ); - } - test_field___fpu_stmm4(); - fn test_field___fpu_stmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm5) - ) - ); - } - test_field___fpu_stmm5(); - fn test_field___fpu_stmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm6) - ) - ); - } - test_field___fpu_stmm6(); - fn test_field___fpu_stmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_stmm7) - ) - ); - } - test_field___fpu_stmm7(); - fn test_field___fpu_xmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm0) - ) - ); - } - test_field___fpu_xmm0(); - fn test_field___fpu_xmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm1) - ) - ); - } - test_field___fpu_xmm1(); - fn test_field___fpu_xmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm2) - ) - ); - } - test_field___fpu_xmm2(); - fn test_field___fpu_xmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm3) - ) - ); - } - test_field___fpu_xmm3(); - fn test_field___fpu_xmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm4) - ) - ); - } - test_field___fpu_xmm4(); - fn test_field___fpu_xmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm5) - ) - ); - } - test_field___fpu_xmm5(); - fn test_field___fpu_xmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm6) - ) - ); - } - test_field___fpu_xmm6(); - fn test_field___fpu_xmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm7) - ) - ); - } - test_field___fpu_xmm7(); - fn test_field___fpu_xmm8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm8) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm8) - ) - ); - } - test_field___fpu_xmm8(); - fn test_field___fpu_xmm9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm9) as usize - ptr as usize - }, - 312usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm9) - ) - ); - } - test_field___fpu_xmm9(); - fn test_field___fpu_xmm10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm10) as usize - ptr as usize - }, - 328usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm10) - ) - ); - } - test_field___fpu_xmm10(); - fn test_field___fpu_xmm11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm11) as usize - ptr as usize - }, - 344usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm11) - ) - ); - } - test_field___fpu_xmm11(); - fn test_field___fpu_xmm12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm12) as usize - ptr as usize - }, - 360usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm12) - ) - ); - } - test_field___fpu_xmm12(); - fn test_field___fpu_xmm13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm13) as usize - ptr as usize - }, - 376usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm13) - ) - ); - } - test_field___fpu_xmm13(); - fn test_field___fpu_xmm14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm14) as usize - ptr as usize - }, - 392usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm14) - ) - ); - } - test_field___fpu_xmm14(); - fn test_field___fpu_xmm15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm15) as usize - ptr as usize - }, - 408usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_xmm15) - ) - ); - } - test_field___fpu_xmm15(); - fn test_field___fpu_rsrv4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize - }, - 424usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_rsrv4) - ) - ); - } - test_field___fpu_rsrv4(); - fn test_field___fpu_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_float_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize - }, - 520usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_float_state64), - "::", - stringify!(__fpu_reserved1) - ) - ); - } - test_field___fpu_reserved1(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_reserved) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_fcw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_fsw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_ftw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize }, + 13usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_rsrv1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize }, + 14usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_fop) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize }, + 22usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_rsrv2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_dp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize }, + 30usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_rsrv3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_mxcsr) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_mxcsrmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_stmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm8) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm9) as usize - ptr as usize }, + 312usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm10) as usize - ptr as usize }, + 328usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm11) as usize - ptr as usize }, + 344usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm12) as usize - ptr as usize }, + 360usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm13) as usize - ptr as usize }, + 376usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm14) as usize - ptr as usize }, + 392usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm15) as usize - ptr as usize }, + 408usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_xmm15) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize }, + 424usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_rsrv4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_float_state64), + "::", + stringify!(__fpu_reserved1) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -6549,6 +4759,8 @@ pub struct __darwin_x86_avx_state64 { } #[test] fn bindgen_test_layout___darwin_x86_avx_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_avx_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_avx_state64>(), 844usize, @@ -6559,975 +4771,576 @@ fn bindgen_test_layout___darwin_x86_avx_state64() { 4usize, concat!("Alignment of ", stringify!(__darwin_x86_avx_state64)) ); - fn test_field___fpu_reserved() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_reserved) - ) - ); - } - test_field___fpu_reserved(); - fn test_field___fpu_fcw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_fcw) - ) - ); - } - test_field___fpu_fcw(); - fn test_field___fpu_fsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_fsw) - ) - ); - } - test_field___fpu_fsw(); - fn test_field___fpu_ftw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ftw) - ) - ); - } - test_field___fpu_ftw(); - fn test_field___fpu_rsrv1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize - }, - 13usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_rsrv1) - ) - ); - } - test_field___fpu_rsrv1(); - fn test_field___fpu_fop() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize - }, - 14usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_fop) - ) - ); - } - test_field___fpu_fop(); - fn test_field___fpu_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ip) - ) - ); - } - test_field___fpu_ip(); - fn test_field___fpu_cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_cs) - ) - ); - } - test_field___fpu_cs(); - fn test_field___fpu_rsrv2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize - }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_rsrv2) - ) - ); - } - test_field___fpu_rsrv2(); - fn test_field___fpu_dp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_dp) - ) - ); - } - test_field___fpu_dp(); - fn test_field___fpu_ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ds) - ) - ); - } - test_field___fpu_ds(); - fn test_field___fpu_rsrv3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize - }, - 30usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_rsrv3) - ) - ); - } - test_field___fpu_rsrv3(); - fn test_field___fpu_mxcsr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_mxcsr) - ) - ); - } - test_field___fpu_mxcsr(); - fn test_field___fpu_mxcsrmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_mxcsrmask) - ) - ); - } - test_field___fpu_mxcsrmask(); - fn test_field___fpu_stmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm0) - ) - ); - } - test_field___fpu_stmm0(); - fn test_field___fpu_stmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm1) - ) - ); - } - test_field___fpu_stmm1(); - fn test_field___fpu_stmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm2) - ) - ); - } - test_field___fpu_stmm2(); - fn test_field___fpu_stmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm3) - ) - ); - } - test_field___fpu_stmm3(); - fn test_field___fpu_stmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm4) - ) - ); - } - test_field___fpu_stmm4(); - fn test_field___fpu_stmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm5) - ) - ); - } - test_field___fpu_stmm5(); - fn test_field___fpu_stmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm6) - ) - ); - } - test_field___fpu_stmm6(); - fn test_field___fpu_stmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_stmm7) - ) - ); - } - test_field___fpu_stmm7(); - fn test_field___fpu_xmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm0) - ) - ); - } - test_field___fpu_xmm0(); - fn test_field___fpu_xmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm1) - ) - ); - } - test_field___fpu_xmm1(); - fn test_field___fpu_xmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm2) - ) - ); - } - test_field___fpu_xmm2(); - fn test_field___fpu_xmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm3) - ) - ); - } - test_field___fpu_xmm3(); - fn test_field___fpu_xmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm4) - ) - ); - } - test_field___fpu_xmm4(); - fn test_field___fpu_xmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm5) - ) - ); - } - test_field___fpu_xmm5(); - fn test_field___fpu_xmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm6) - ) - ); - } - test_field___fpu_xmm6(); - fn test_field___fpu_xmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm7) - ) - ); - } - test_field___fpu_xmm7(); - fn test_field___fpu_xmm8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm8) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm8) - ) - ); - } - test_field___fpu_xmm8(); - fn test_field___fpu_xmm9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm9) as usize - ptr as usize - }, - 312usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm9) - ) - ); - } - test_field___fpu_xmm9(); - fn test_field___fpu_xmm10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm10) as usize - ptr as usize - }, - 328usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm10) - ) - ); - } - test_field___fpu_xmm10(); - fn test_field___fpu_xmm11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm11) as usize - ptr as usize - }, - 344usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm11) - ) - ); - } - test_field___fpu_xmm11(); - fn test_field___fpu_xmm12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm12) as usize - ptr as usize - }, - 360usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm12) - ) - ); - } - test_field___fpu_xmm12(); - fn test_field___fpu_xmm13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm13) as usize - ptr as usize - }, - 376usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm13) - ) - ); - } - test_field___fpu_xmm13(); - fn test_field___fpu_xmm14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm14) as usize - ptr as usize - }, - 392usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm14) - ) - ); - } - test_field___fpu_xmm14(); - fn test_field___fpu_xmm15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm15) as usize - ptr as usize - }, - 408usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_xmm15) - ) - ); - } - test_field___fpu_xmm15(); - fn test_field___fpu_rsrv4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize - }, - 424usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_rsrv4) - ) - ); - } - test_field___fpu_rsrv4(); - fn test_field___fpu_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize - }, - 520usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_reserved1) - ) - ); - } - test_field___fpu_reserved1(); - fn test_field___avx_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize - }, - 524usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__avx_reserved1) - ) - ); - } - test_field___avx_reserved1(); - fn test_field___fpu_ymmh0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize - }, - 588usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh0) - ) - ); - } - test_field___fpu_ymmh0(); - fn test_field___fpu_ymmh1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize - }, - 604usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh1) - ) - ); - } - test_field___fpu_ymmh1(); - fn test_field___fpu_ymmh2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize - }, - 620usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh2) - ) - ); - } - test_field___fpu_ymmh2(); - fn test_field___fpu_ymmh3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize - }, - 636usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh3) - ) - ); - } - test_field___fpu_ymmh3(); - fn test_field___fpu_ymmh4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize - }, - 652usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh4) - ) - ); - } - test_field___fpu_ymmh4(); - fn test_field___fpu_ymmh5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize - }, - 668usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh5) - ) - ); - } - test_field___fpu_ymmh5(); - fn test_field___fpu_ymmh6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize - }, - 684usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh6) - ) - ); - } - test_field___fpu_ymmh6(); - fn test_field___fpu_ymmh7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize - }, - 700usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh7) - ) - ); - } - test_field___fpu_ymmh7(); - fn test_field___fpu_ymmh8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh8) as usize - ptr as usize - }, - 716usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh8) - ) - ); - } - test_field___fpu_ymmh8(); - fn test_field___fpu_ymmh9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh9) as usize - ptr as usize - }, - 732usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh9) - ) - ); - } - test_field___fpu_ymmh9(); - fn test_field___fpu_ymmh10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh10) as usize - ptr as usize - }, - 748usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh10) - ) - ); - } - test_field___fpu_ymmh10(); - fn test_field___fpu_ymmh11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh11) as usize - ptr as usize - }, - 764usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh11) - ) - ); - } - test_field___fpu_ymmh11(); - fn test_field___fpu_ymmh12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh12) as usize - ptr as usize - }, - 780usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh12) - ) - ); - } - test_field___fpu_ymmh12(); - fn test_field___fpu_ymmh13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh13) as usize - ptr as usize - }, - 796usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh13) - ) - ); - } - test_field___fpu_ymmh13(); - fn test_field___fpu_ymmh14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh14) as usize - ptr as usize - }, - 812usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh14) - ) - ); - } - test_field___fpu_ymmh14(); - fn test_field___fpu_ymmh15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh15) as usize - ptr as usize - }, - 828usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx_state64), - "::", - stringify!(__fpu_ymmh15) - ) - ); - } - test_field___fpu_ymmh15(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_reserved) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_fcw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_fsw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ftw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize }, + 13usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_rsrv1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize }, + 14usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_fop) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize }, + 22usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_rsrv2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_dp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize }, + 30usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_rsrv3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_mxcsr) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_mxcsrmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_stmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm8) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm9) as usize - ptr as usize }, + 312usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm10) as usize - ptr as usize }, + 328usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm11) as usize - ptr as usize }, + 344usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm12) as usize - ptr as usize }, + 360usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm13) as usize - ptr as usize }, + 376usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm14) as usize - ptr as usize }, + 392usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm15) as usize - ptr as usize }, + 408usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_xmm15) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize }, + 424usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_rsrv4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize }, + 524usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__avx_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize }, + 588usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize }, + 604usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize }, + 620usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize }, + 636usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize }, + 652usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize }, + 668usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize }, + 684usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize }, + 700usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh8) as usize - ptr as usize }, + 716usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh9) as usize - ptr as usize }, + 732usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh10) as usize - ptr as usize }, + 748usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh11) as usize - ptr as usize }, + 764usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh12) as usize - ptr as usize }, + 780usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh13) as usize - ptr as usize }, + 796usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh14) as usize - ptr as usize }, + 812usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh15) as usize - ptr as usize }, + 828usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx_state64), + "::", + stringify!(__fpu_ymmh15) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -7632,6 +5445,8 @@ pub struct __darwin_x86_avx512_state64 { } #[test] fn bindgen_test_layout___darwin_x86_avx512_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_avx512_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_avx512_state64>(), 2444usize, @@ -7642,1655 +5457,976 @@ fn bindgen_test_layout___darwin_x86_avx512_state64() { 4usize, concat!("Alignment of ", stringify!(__darwin_x86_avx512_state64)) ); - fn test_field___fpu_reserved() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_reserved) - ) - ); - } - test_field___fpu_reserved(); - fn test_field___fpu_fcw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_fcw) - ) - ); - } - test_field___fpu_fcw(); - fn test_field___fpu_fsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize - }, - 10usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_fsw) - ) - ); - } - test_field___fpu_fsw(); - fn test_field___fpu_ftw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ftw) - ) - ); - } - test_field___fpu_ftw(); - fn test_field___fpu_rsrv1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize - }, - 13usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_rsrv1) - ) - ); - } - test_field___fpu_rsrv1(); - fn test_field___fpu_fop() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize - }, - 14usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_fop) - ) - ); - } - test_field___fpu_fop(); - fn test_field___fpu_ip() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ip) - ) - ); - } - test_field___fpu_ip(); - fn test_field___fpu_cs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize - }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_cs) - ) - ); - } - test_field___fpu_cs(); - fn test_field___fpu_rsrv2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize - }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_rsrv2) - ) - ); - } - test_field___fpu_rsrv2(); - fn test_field___fpu_dp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_dp) - ) - ); - } - test_field___fpu_dp(); - fn test_field___fpu_ds() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize - }, - 28usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ds) - ) - ); - } - test_field___fpu_ds(); - fn test_field___fpu_rsrv3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize - }, - 30usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_rsrv3) - ) - ); - } - test_field___fpu_rsrv3(); - fn test_field___fpu_mxcsr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_mxcsr) - ) - ); - } - test_field___fpu_mxcsr(); - fn test_field___fpu_mxcsrmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize - }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_mxcsrmask) - ) - ); - } - test_field___fpu_mxcsrmask(); - fn test_field___fpu_stmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm0) - ) - ); - } - test_field___fpu_stmm0(); - fn test_field___fpu_stmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm1) - ) - ); - } - test_field___fpu_stmm1(); - fn test_field___fpu_stmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm2) - ) - ); - } - test_field___fpu_stmm2(); - fn test_field___fpu_stmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm3) - ) - ); - } - test_field___fpu_stmm3(); - fn test_field___fpu_stmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm4) - ) - ); - } - test_field___fpu_stmm4(); - fn test_field___fpu_stmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm5) - ) - ); - } - test_field___fpu_stmm5(); - fn test_field___fpu_stmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm6) - ) - ); - } - test_field___fpu_stmm6(); - fn test_field___fpu_stmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_stmm7) - ) - ); - } - test_field___fpu_stmm7(); - fn test_field___fpu_xmm0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm0) - ) - ); - } - test_field___fpu_xmm0(); - fn test_field___fpu_xmm1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm1) - ) - ); - } - test_field___fpu_xmm1(); - fn test_field___fpu_xmm2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm2) - ) - ); - } - test_field___fpu_xmm2(); - fn test_field___fpu_xmm3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm3) - ) - ); - } - test_field___fpu_xmm3(); - fn test_field___fpu_xmm4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm4) - ) - ); - } - test_field___fpu_xmm4(); - fn test_field___fpu_xmm5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm5) - ) - ); - } - test_field___fpu_xmm5(); - fn test_field___fpu_xmm6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm6) - ) - ); - } - test_field___fpu_xmm6(); - fn test_field___fpu_xmm7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm7) - ) - ); - } - test_field___fpu_xmm7(); - fn test_field___fpu_xmm8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm8) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm8) - ) - ); - } - test_field___fpu_xmm8(); - fn test_field___fpu_xmm9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm9) as usize - ptr as usize - }, - 312usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm9) - ) - ); - } - test_field___fpu_xmm9(); - fn test_field___fpu_xmm10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm10) as usize - ptr as usize - }, - 328usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm10) - ) - ); - } - test_field___fpu_xmm10(); - fn test_field___fpu_xmm11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm11) as usize - ptr as usize - }, - 344usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm11) - ) - ); - } - test_field___fpu_xmm11(); - fn test_field___fpu_xmm12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm12) as usize - ptr as usize - }, - 360usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm12) - ) - ); - } - test_field___fpu_xmm12(); - fn test_field___fpu_xmm13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm13) as usize - ptr as usize - }, - 376usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm13) - ) - ); - } - test_field___fpu_xmm13(); - fn test_field___fpu_xmm14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm14) as usize - ptr as usize - }, - 392usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm14) - ) - ); - } - test_field___fpu_xmm14(); - fn test_field___fpu_xmm15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_xmm15) as usize - ptr as usize - }, - 408usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_xmm15) - ) - ); - } - test_field___fpu_xmm15(); - fn test_field___fpu_rsrv4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize - }, - 424usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_rsrv4) - ) - ); - } - test_field___fpu_rsrv4(); - fn test_field___fpu_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize - }, - 520usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_reserved1) - ) - ); - } - test_field___fpu_reserved1(); - fn test_field___avx_reserved1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize - }, - 524usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__avx_reserved1) - ) - ); - } - test_field___avx_reserved1(); - fn test_field___fpu_ymmh0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize - }, - 588usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh0) - ) - ); - } - test_field___fpu_ymmh0(); - fn test_field___fpu_ymmh1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize - }, - 604usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh1) - ) - ); - } - test_field___fpu_ymmh1(); - fn test_field___fpu_ymmh2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize - }, - 620usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh2) - ) - ); - } - test_field___fpu_ymmh2(); - fn test_field___fpu_ymmh3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize - }, - 636usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh3) - ) - ); - } - test_field___fpu_ymmh3(); - fn test_field___fpu_ymmh4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize - }, - 652usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh4) - ) - ); - } - test_field___fpu_ymmh4(); - fn test_field___fpu_ymmh5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize - }, - 668usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh5) - ) - ); - } - test_field___fpu_ymmh5(); - fn test_field___fpu_ymmh6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize - }, - 684usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh6) - ) - ); - } - test_field___fpu_ymmh6(); - fn test_field___fpu_ymmh7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize - }, - 700usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh7) - ) - ); - } - test_field___fpu_ymmh7(); - fn test_field___fpu_ymmh8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh8) as usize - ptr as usize - }, - 716usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh8) - ) - ); - } - test_field___fpu_ymmh8(); - fn test_field___fpu_ymmh9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh9) as usize - ptr as usize - }, - 732usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh9) - ) - ); - } - test_field___fpu_ymmh9(); - fn test_field___fpu_ymmh10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh10) as usize - ptr as usize - }, - 748usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh10) - ) - ); - } - test_field___fpu_ymmh10(); - fn test_field___fpu_ymmh11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh11) as usize - ptr as usize - }, - 764usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh11) - ) - ); - } - test_field___fpu_ymmh11(); - fn test_field___fpu_ymmh12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh12) as usize - ptr as usize - }, - 780usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh12) - ) - ); - } - test_field___fpu_ymmh12(); - fn test_field___fpu_ymmh13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh13) as usize - ptr as usize - }, - 796usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh13) - ) - ); - } - test_field___fpu_ymmh13(); - fn test_field___fpu_ymmh14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh14) as usize - ptr as usize - }, - 812usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh14) - ) - ); - } - test_field___fpu_ymmh14(); - fn test_field___fpu_ymmh15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_ymmh15) as usize - ptr as usize - }, - 828usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_ymmh15) - ) - ); - } - test_field___fpu_ymmh15(); - fn test_field___fpu_k0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k0) as usize - ptr as usize - }, - 844usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k0) - ) - ); - } - test_field___fpu_k0(); - fn test_field___fpu_k1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k1) as usize - ptr as usize - }, - 852usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k1) - ) - ); - } - test_field___fpu_k1(); - fn test_field___fpu_k2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k2) as usize - ptr as usize - }, - 860usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k2) - ) - ); - } - test_field___fpu_k2(); - fn test_field___fpu_k3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k3) as usize - ptr as usize - }, - 868usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k3) - ) - ); - } - test_field___fpu_k3(); - fn test_field___fpu_k4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k4) as usize - ptr as usize - }, - 876usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k4) - ) - ); - } - test_field___fpu_k4(); - fn test_field___fpu_k5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k5) as usize - ptr as usize - }, - 884usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k5) - ) - ); - } - test_field___fpu_k5(); - fn test_field___fpu_k6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k6) as usize - ptr as usize - }, - 892usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k6) - ) - ); - } - test_field___fpu_k6(); - fn test_field___fpu_k7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_k7) as usize - ptr as usize - }, - 900usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_k7) - ) - ); - } - test_field___fpu_k7(); - fn test_field___fpu_zmmh0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh0) as usize - ptr as usize - }, - 908usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh0) - ) - ); - } - test_field___fpu_zmmh0(); - fn test_field___fpu_zmmh1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh1) as usize - ptr as usize - }, - 940usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh1) - ) - ); - } - test_field___fpu_zmmh1(); - fn test_field___fpu_zmmh2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh2) as usize - ptr as usize - }, - 972usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh2) - ) - ); - } - test_field___fpu_zmmh2(); - fn test_field___fpu_zmmh3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh3) as usize - ptr as usize - }, - 1004usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh3) - ) - ); - } - test_field___fpu_zmmh3(); - fn test_field___fpu_zmmh4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh4) as usize - ptr as usize - }, - 1036usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh4) - ) - ); - } - test_field___fpu_zmmh4(); - fn test_field___fpu_zmmh5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh5) as usize - ptr as usize - }, - 1068usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh5) - ) - ); - } - test_field___fpu_zmmh5(); - fn test_field___fpu_zmmh6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh6) as usize - ptr as usize - }, - 1100usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh6) - ) - ); - } - test_field___fpu_zmmh6(); - fn test_field___fpu_zmmh7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh7) as usize - ptr as usize - }, - 1132usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh7) - ) - ); - } - test_field___fpu_zmmh7(); - fn test_field___fpu_zmmh8() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh8) as usize - ptr as usize - }, - 1164usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh8) - ) - ); - } - test_field___fpu_zmmh8(); - fn test_field___fpu_zmmh9() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh9) as usize - ptr as usize - }, - 1196usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh9) - ) - ); - } - test_field___fpu_zmmh9(); - fn test_field___fpu_zmmh10() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh10) as usize - ptr as usize - }, - 1228usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh10) - ) - ); - } - test_field___fpu_zmmh10(); - fn test_field___fpu_zmmh11() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh11) as usize - ptr as usize - }, - 1260usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh11) - ) - ); - } - test_field___fpu_zmmh11(); - fn test_field___fpu_zmmh12() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh12) as usize - ptr as usize - }, - 1292usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh12) - ) - ); - } - test_field___fpu_zmmh12(); - fn test_field___fpu_zmmh13() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh13) as usize - ptr as usize - }, - 1324usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh13) - ) - ); - } - test_field___fpu_zmmh13(); - fn test_field___fpu_zmmh14() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh14) as usize - ptr as usize - }, - 1356usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh14) - ) - ); - } - test_field___fpu_zmmh14(); - fn test_field___fpu_zmmh15() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmmh15) as usize - ptr as usize - }, - 1388usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmmh15) - ) - ); - } - test_field___fpu_zmmh15(); - fn test_field___fpu_zmm16() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm16) as usize - ptr as usize - }, - 1420usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm16) - ) - ); - } - test_field___fpu_zmm16(); - fn test_field___fpu_zmm17() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm17) as usize - ptr as usize - }, - 1484usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm17) - ) - ); - } - test_field___fpu_zmm17(); - fn test_field___fpu_zmm18() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm18) as usize - ptr as usize - }, - 1548usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm18) - ) - ); - } - test_field___fpu_zmm18(); - fn test_field___fpu_zmm19() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm19) as usize - ptr as usize - }, - 1612usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm19) - ) - ); - } - test_field___fpu_zmm19(); - fn test_field___fpu_zmm20() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm20) as usize - ptr as usize - }, - 1676usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm20) - ) - ); - } - test_field___fpu_zmm20(); - fn test_field___fpu_zmm21() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm21) as usize - ptr as usize - }, - 1740usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm21) - ) - ); - } - test_field___fpu_zmm21(); - fn test_field___fpu_zmm22() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm22) as usize - ptr as usize - }, - 1804usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm22) - ) - ); - } - test_field___fpu_zmm22(); - fn test_field___fpu_zmm23() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm23) as usize - ptr as usize - }, - 1868usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm23) - ) - ); - } - test_field___fpu_zmm23(); - fn test_field___fpu_zmm24() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm24) as usize - ptr as usize - }, - 1932usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm24) - ) - ); - } - test_field___fpu_zmm24(); - fn test_field___fpu_zmm25() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm25) as usize - ptr as usize - }, - 1996usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm25) - ) - ); - } - test_field___fpu_zmm25(); - fn test_field___fpu_zmm26() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm26) as usize - ptr as usize - }, - 2060usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm26) - ) - ); - } - test_field___fpu_zmm26(); - fn test_field___fpu_zmm27() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm27) as usize - ptr as usize - }, - 2124usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm27) - ) - ); - } - test_field___fpu_zmm27(); - fn test_field___fpu_zmm28() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm28) as usize - ptr as usize - }, - 2188usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm28) - ) - ); - } - test_field___fpu_zmm28(); - fn test_field___fpu_zmm29() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm29) as usize - ptr as usize - }, - 2252usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm29) - ) - ); - } - test_field___fpu_zmm29(); - fn test_field___fpu_zmm30() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm30) as usize - ptr as usize - }, - 2316usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm30) - ) - ); - } - test_field___fpu_zmm30(); - fn test_field___fpu_zmm31() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_avx512_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fpu_zmm31) as usize - ptr as usize - }, - 2380usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_avx512_state64), - "::", - stringify!(__fpu_zmm31) - ) - ); - } - test_field___fpu_zmm31(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_reserved) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fcw) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_fcw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fsw) as usize - ptr as usize }, + 10usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_fsw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ftw) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ftw) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv1) as usize - ptr as usize }, + 13usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_rsrv1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_fop) as usize - ptr as usize }, + 14usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_fop) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ip) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ip) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_cs) as usize - ptr as usize }, + 20usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_cs) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv2) as usize - ptr as usize }, + 22usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_rsrv2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_dp) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_dp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ds) as usize - ptr as usize }, + 28usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ds) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv3) as usize - ptr as usize }, + 30usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_rsrv3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsr) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_mxcsr) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_mxcsrmask) as usize - ptr as usize }, + 36usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_mxcsrmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm0) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm1) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm2) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm3) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm4) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm5) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm6) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_stmm7) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_stmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm0) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm1) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm2) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm3) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm4) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm5) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm6) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm7) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm8) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm9) as usize - ptr as usize }, + 312usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm10) as usize - ptr as usize }, + 328usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm11) as usize - ptr as usize }, + 344usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm12) as usize - ptr as usize }, + 360usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm13) as usize - ptr as usize }, + 376usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm14) as usize - ptr as usize }, + 392usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_xmm15) as usize - ptr as usize }, + 408usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_xmm15) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_rsrv4) as usize - ptr as usize }, + 424usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_rsrv4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_reserved1) as usize - ptr as usize }, + 520usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__avx_reserved1) as usize - ptr as usize }, + 524usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__avx_reserved1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh0) as usize - ptr as usize }, + 588usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh1) as usize - ptr as usize }, + 604usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh2) as usize - ptr as usize }, + 620usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh3) as usize - ptr as usize }, + 636usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh4) as usize - ptr as usize }, + 652usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh5) as usize - ptr as usize }, + 668usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh6) as usize - ptr as usize }, + 684usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh7) as usize - ptr as usize }, + 700usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh8) as usize - ptr as usize }, + 716usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh9) as usize - ptr as usize }, + 732usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh10) as usize - ptr as usize }, + 748usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh11) as usize - ptr as usize }, + 764usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh12) as usize - ptr as usize }, + 780usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh13) as usize - ptr as usize }, + 796usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh14) as usize - ptr as usize }, + 812usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_ymmh15) as usize - ptr as usize }, + 828usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_ymmh15) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k0) as usize - ptr as usize }, + 844usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k1) as usize - ptr as usize }, + 852usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k2) as usize - ptr as usize }, + 860usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k3) as usize - ptr as usize }, + 868usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k4) as usize - ptr as usize }, + 876usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k5) as usize - ptr as usize }, + 884usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k6) as usize - ptr as usize }, + 892usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_k7) as usize - ptr as usize }, + 900usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_k7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh0) as usize - ptr as usize }, + 908usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh1) as usize - ptr as usize }, + 940usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh2) as usize - ptr as usize }, + 972usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh3) as usize - ptr as usize }, + 1004usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh4) as usize - ptr as usize }, + 1036usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh5) as usize - ptr as usize }, + 1068usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh6) as usize - ptr as usize }, + 1100usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh7) as usize - ptr as usize }, + 1132usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh7) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh8) as usize - ptr as usize }, + 1164usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh8) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh9) as usize - ptr as usize }, + 1196usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh9) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh10) as usize - ptr as usize }, + 1228usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh10) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh11) as usize - ptr as usize }, + 1260usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh11) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh12) as usize - ptr as usize }, + 1292usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh12) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh13) as usize - ptr as usize }, + 1324usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh13) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh14) as usize - ptr as usize }, + 1356usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh14) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmmh15) as usize - ptr as usize }, + 1388usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmmh15) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm16) as usize - ptr as usize }, + 1420usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm16) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm17) as usize - ptr as usize }, + 1484usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm17) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm18) as usize - ptr as usize }, + 1548usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm18) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm19) as usize - ptr as usize }, + 1612usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm19) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm20) as usize - ptr as usize }, + 1676usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm20) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm21) as usize - ptr as usize }, + 1740usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm21) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm22) as usize - ptr as usize }, + 1804usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm22) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm23) as usize - ptr as usize }, + 1868usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm23) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm24) as usize - ptr as usize }, + 1932usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm24) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm25) as usize - ptr as usize }, + 1996usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm25) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm26) as usize - ptr as usize }, + 2060usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm26) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm27) as usize - ptr as usize }, + 2124usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm27) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm28) as usize - ptr as usize }, + 2188usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm28) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm29) as usize - ptr as usize }, + 2252usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm29) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm30) as usize - ptr as usize }, + 2316usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm30) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fpu_zmm31) as usize - ptr as usize }, + 2380usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_avx512_state64), + "::", + stringify!(__fpu_zmm31) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9302,6 +6438,8 @@ pub struct __darwin_x86_exception_state64 { } #[test] fn bindgen_test_layout___darwin_x86_exception_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_exception_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_exception_state64>(), 16usize, @@ -9312,74 +6450,46 @@ fn bindgen_test_layout___darwin_x86_exception_state64() { 8usize, concat!("Alignment of ", stringify!(__darwin_x86_exception_state64)) ); - fn test_field___trapno() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_exception_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__trapno) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_exception_state64), - "::", - stringify!(__trapno) - ) - ); - } - test_field___trapno(); - fn test_field___cpu() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_exception_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__cpu) as usize - ptr as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_exception_state64), - "::", - stringify!(__cpu) - ) - ); - } - test_field___cpu(); - fn test_field___err() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_exception_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__err) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_exception_state64), - "::", - stringify!(__err) - ) - ); - } - test_field___err(); - fn test_field___faultvaddr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_exception_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__faultvaddr) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_exception_state64), - "::", - stringify!(__faultvaddr) - ) - ); - } - test_field___faultvaddr(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__trapno) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_exception_state64), + "::", + stringify!(__trapno) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__cpu) as usize - ptr as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_exception_state64), + "::", + stringify!(__cpu) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__err) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_exception_state64), + "::", + stringify!(__err) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__faultvaddr) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_exception_state64), + "::", + stringify!(__faultvaddr) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9395,6 +6505,8 @@ pub struct __darwin_x86_debug_state64 { } #[test] fn bindgen_test_layout___darwin_x86_debug_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_debug_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_debug_state64>(), 64usize, @@ -9405,142 +6517,86 @@ fn bindgen_test_layout___darwin_x86_debug_state64() { 8usize, concat!("Alignment of ", stringify!(__darwin_x86_debug_state64)) ); - fn test_field___dr0() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr0) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr0) - ) - ); - } - test_field___dr0(); - fn test_field___dr1() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr1) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr1) - ) - ); - } - test_field___dr1(); - fn test_field___dr2() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr2) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr2) - ) - ); - } - test_field___dr2(); - fn test_field___dr3() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr3) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr3) - ) - ); - } - test_field___dr3(); - fn test_field___dr4() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr4) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr4) - ) - ); - } - test_field___dr4(); - fn test_field___dr5() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr5) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr5) - ) - ); - } - test_field___dr5(); - fn test_field___dr6() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr6) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr6) - ) - ); - } - test_field___dr6(); - fn test_field___dr7() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_debug_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__dr7) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_debug_state64), - "::", - stringify!(__dr7) - ) - ); - } - test_field___dr7(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr0) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr0) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr1) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr1) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr2) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr2) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr3) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr3) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr4) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr4) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr5) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr5) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr6) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr6) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__dr7) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_debug_state64), + "::", + stringify!(__dr7) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9549,6 +6605,8 @@ pub struct __darwin_x86_cpmu_state64 { } #[test] fn bindgen_test_layout___darwin_x86_cpmu_state64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_x86_cpmu_state64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_x86_cpmu_state64>(), 128usize, @@ -9559,23 +6617,16 @@ fn bindgen_test_layout___darwin_x86_cpmu_state64() { 8usize, concat!("Alignment of ", stringify!(__darwin_x86_cpmu_state64)) ); - fn test_field___ctrs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_x86_cpmu_state64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ctrs) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_x86_cpmu_state64), - "::", - stringify!(__ctrs) - ) - ); - } - test_field___ctrs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ctrs) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_x86_cpmu_state64), + "::", + stringify!(__ctrs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9586,6 +6637,8 @@ pub struct __darwin_mcontext32 { } #[test] fn bindgen_test_layout___darwin_mcontext32() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext32> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext32>(), 600usize, @@ -9596,57 +6649,36 @@ fn bindgen_test_layout___darwin_mcontext32() { 4usize, concat!("Alignment of ", stringify!(__darwin_mcontext32)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext32), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext32), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 76usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext32), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext32), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext32), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 76usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext32), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9657,6 +6689,8 @@ pub struct __darwin_mcontext_avx32 { } #[test] fn bindgen_test_layout___darwin_mcontext_avx32() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext_avx32> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext_avx32>(), 792usize, @@ -9667,57 +6701,36 @@ fn bindgen_test_layout___darwin_mcontext_avx32() { 4usize, concat!("Alignment of ", stringify!(__darwin_mcontext_avx32)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx32), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx32), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 76usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx32), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx32), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx32), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 76usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx32), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9728,6 +6741,8 @@ pub struct __darwin_mcontext_avx512_32 { } #[test] fn bindgen_test_layout___darwin_mcontext_avx512_32() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext_avx512_32> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext_avx512_32>(), 1112usize, @@ -9738,57 +6753,36 @@ fn bindgen_test_layout___darwin_mcontext_avx512_32() { 4usize, concat!("Alignment of ", stringify!(__darwin_mcontext_avx512_32)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_32), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_32), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_32>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 76usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_32), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_32), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 12usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_32), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 76usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_32), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9799,6 +6793,8 @@ pub struct __darwin_mcontext64 { } #[test] fn bindgen_test_layout___darwin_mcontext64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext64>(), 712usize, @@ -9809,57 +6805,36 @@ fn bindgen_test_layout___darwin_mcontext64() { 8usize, concat!("Alignment of ", stringify!(__darwin_mcontext64)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext64), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext64), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext64), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext64), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext64), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext64), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9870,6 +6845,8 @@ pub struct __darwin_mcontext64_full { } #[test] fn bindgen_test_layout___darwin_mcontext64_full() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext64_full> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext64_full>(), 744usize, @@ -9880,57 +6857,36 @@ fn bindgen_test_layout___darwin_mcontext64_full() { 8usize, concat!("Alignment of ", stringify!(__darwin_mcontext64_full)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext64_full), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext64_full), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext64_full), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext64_full), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext64_full), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext64_full), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -9941,6 +6897,8 @@ pub struct __darwin_mcontext_avx64 { } #[test] fn bindgen_test_layout___darwin_mcontext_avx64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext_avx64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext_avx64>(), 1032usize, @@ -9951,57 +6909,36 @@ fn bindgen_test_layout___darwin_mcontext_avx64() { 8usize, concat!("Alignment of ", stringify!(__darwin_mcontext_avx64)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx64), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx64), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx64), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx64), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx64), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx64), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -10012,6 +6949,8 @@ pub struct __darwin_mcontext_avx64_full { } #[test] fn bindgen_test_layout___darwin_mcontext_avx64_full() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext_avx64_full> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext_avx64_full>(), 1064usize, @@ -10022,57 +6961,36 @@ fn bindgen_test_layout___darwin_mcontext_avx64_full() { 8usize, concat!("Alignment of ", stringify!(__darwin_mcontext_avx64_full)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx64_full), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx64_full), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx64_full), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx64_full), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx64_full), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx64_full), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -10083,6 +7001,8 @@ pub struct __darwin_mcontext_avx512_64 { } #[test] fn bindgen_test_layout___darwin_mcontext_avx512_64() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext_avx512_64> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext_avx512_64>(), 2632usize, @@ -10093,57 +7013,36 @@ fn bindgen_test_layout___darwin_mcontext_avx512_64() { 8usize, concat!("Alignment of ", stringify!(__darwin_mcontext_avx512_64)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_64), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_64), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_64>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_64), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_64), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_64), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_64), + "::", + stringify!(__fs) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -10154,6 +7053,8 @@ pub struct __darwin_mcontext_avx512_64_full { } #[test] fn bindgen_test_layout___darwin_mcontext_avx512_64_full() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_mcontext_avx512_64_full> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_mcontext_avx512_64_full>(), 2664usize, @@ -10164,57 +7065,36 @@ fn bindgen_test_layout___darwin_mcontext_avx512_64_full() { 8usize, concat!("Alignment of ", stringify!(__darwin_mcontext_avx512_64_full)) ); - fn test_field___es() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_64_full), - "::", - stringify!(__es) - ) - ); - } - test_field___es(); - fn test_field___ss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_64_full), - "::", - stringify!(__ss) - ) - ); - } - test_field___ss(); - fn test_field___fs() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_mcontext_avx512_64_full>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(__darwin_mcontext_avx512_64_full), - "::", - stringify!(__fs) - ) - ); - } - test_field___fs(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__es) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_64_full), + "::", + stringify!(__es) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__ss) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_64_full), + "::", + stringify!(__ss) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__fs) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(__darwin_mcontext_avx512_64_full), + "::", + stringify!(__fs) + ) + ); } pub type mcontext_t = *mut __darwin_mcontext64; pub type pthread_attr_t = __darwin_pthread_attr_t; @@ -10227,6 +7107,8 @@ pub struct __darwin_sigaltstack { } #[test] fn bindgen_test_layout___darwin_sigaltstack() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_sigaltstack> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_sigaltstack>(), 24usize, @@ -10237,57 +7119,36 @@ fn bindgen_test_layout___darwin_sigaltstack() { 8usize, concat!("Alignment of ", stringify!(__darwin_sigaltstack)) ); - fn test_field_ss_sp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_sigaltstack>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ss_sp) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_sigaltstack), - "::", - stringify!(ss_sp) - ) - ); - } - test_field_ss_sp(); - fn test_field_ss_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_sigaltstack>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ss_size) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_sigaltstack), - "::", - stringify!(ss_size) - ) - ); - } - test_field_ss_size(); - fn test_field_ss_flags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_sigaltstack>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ss_flags) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__darwin_sigaltstack), - "::", - stringify!(ss_flags) - ) - ); - } - test_field_ss_flags(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ss_sp) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_sigaltstack), + "::", + stringify!(ss_sp) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ss_size) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_sigaltstack), + "::", + stringify!(ss_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ss_flags) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__darwin_sigaltstack), + "::", + stringify!(ss_flags) + ) + ); } pub type stack_t = __darwin_sigaltstack; #[repr(C)] @@ -10302,6 +7163,8 @@ pub struct __darwin_ucontext { } #[test] fn bindgen_test_layout___darwin_ucontext() { + const UNINIT: ::std::mem::MaybeUninit<__darwin_ucontext> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__darwin_ucontext>(), 56usize, @@ -10312,112 +7175,69 @@ fn bindgen_test_layout___darwin_ucontext() { 8usize, concat!("Alignment of ", stringify!(__darwin_ucontext)) ); - fn test_field_uc_onstack() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ucontext>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).uc_onstack) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ucontext), - "::", - stringify!(uc_onstack) - ) - ); - } - test_field_uc_onstack(); - fn test_field_uc_sigmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ucontext>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).uc_sigmask) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ucontext), - "::", - stringify!(uc_sigmask) - ) - ); - } - test_field_uc_sigmask(); - fn test_field_uc_stack() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ucontext>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).uc_stack) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ucontext), - "::", - stringify!(uc_stack) - ) - ); - } - test_field_uc_stack(); - fn test_field_uc_link() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ucontext>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).uc_link) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ucontext), - "::", - stringify!(uc_link) - ) - ); - } - test_field_uc_link(); - fn test_field_uc_mcsize() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ucontext>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).uc_mcsize) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ucontext), - "::", - stringify!(uc_mcsize) - ) - ); - } - test_field_uc_mcsize(); - fn test_field_uc_mcontext() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__darwin_ucontext>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).uc_mcontext) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(__darwin_ucontext), - "::", - stringify!(uc_mcontext) - ) - ); - } - test_field_uc_mcontext(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).uc_onstack) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ucontext), + "::", + stringify!(uc_onstack) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).uc_sigmask) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ucontext), + "::", + stringify!(uc_sigmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).uc_stack) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ucontext), + "::", + stringify!(uc_stack) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).uc_link) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ucontext), + "::", + stringify!(uc_link) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).uc_mcsize) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ucontext), + "::", + stringify!(uc_mcsize) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).uc_mcontext) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(__darwin_ucontext), + "::", + stringify!(uc_mcontext) + ) + ); } pub type ucontext_t = __darwin_ucontext; pub type sigset_t = __darwin_sigset_t; -pub type size_t = __darwin_size_t; pub type uid_t = __darwin_uid_t; #[repr(C)] #[derive(Copy, Clone)] @@ -10427,6 +7247,8 @@ pub union sigval { } #[test] fn bindgen_test_layout_sigval() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -10437,30 +7259,16 @@ fn bindgen_test_layout_sigval() { 8usize, concat!("Alignment of ", stringify!(sigval)) ); - fn test_field_sival_int() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sival_int) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(sigval), "::", stringify!(sival_int)) - ); - } - test_field_sival_int(); - fn test_field_sival_ptr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sival_ptr) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(sigval), "::", stringify!(sival_ptr)) - ); - } - test_field_sival_ptr(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sival_int) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(sigval), "::", stringify!(sival_int)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sival_ptr) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(sigval), "::", stringify!(sival_ptr)) + ); } #[repr(C)] #[derive(Copy, Clone)] @@ -10473,6 +7281,8 @@ pub struct sigevent { } #[test] fn bindgen_test_layout_sigevent() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 32usize, @@ -10483,81 +7293,46 @@ fn bindgen_test_layout_sigevent() { 8usize, concat!("Alignment of ", stringify!(sigevent)) ); - fn test_field_sigev_notify() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sigev_notify) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(sigevent), - "::", - stringify!(sigev_notify) - ) - ); - } - test_field_sigev_notify(); - fn test_field_sigev_signo() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sigev_signo) as usize - ptr as usize - }, - 4usize, - concat!("Offset of field: ", stringify!(sigevent), "::", stringify!(sigev_signo)) - ); - } - test_field_sigev_signo(); - fn test_field_sigev_value() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sigev_value) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(sigevent), "::", stringify!(sigev_value)) - ); - } - test_field_sigev_value(); - fn test_field_sigev_notify_function() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sigev_notify_function) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(sigevent), - "::", - stringify!(sigev_notify_function) - ) - ); - } - test_field_sigev_notify_function(); - fn test_field_sigev_notify_attributes() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sigev_notify_attributes) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(sigevent), - "::", - stringify!(sigev_notify_attributes) - ) - ); - } - test_field_sigev_notify_attributes(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sigev_notify) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sigevent), + "::", + stringify!(sigev_notify) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sigev_signo) as usize - ptr as usize }, + 4usize, + concat!("Offset of field: ", stringify!(sigevent), "::", stringify!(sigev_signo)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sigev_value) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(sigevent), "::", stringify!(sigev_value)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sigev_notify_function) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(sigevent), + "::", + stringify!(sigev_notify_function) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sigev_notify_attributes) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(sigevent), + "::", + stringify!(sigev_notify_attributes) + ) + ); } #[repr(C)] #[derive(Copy, Clone)] @@ -10575,6 +7350,8 @@ pub struct __siginfo { } #[test] fn bindgen_test_layout___siginfo() { + const UNINIT: ::std::mem::MaybeUninit<__siginfo> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__siginfo>(), 104usize, @@ -10585,126 +7362,56 @@ fn bindgen_test_layout___siginfo() { 8usize, concat!("Alignment of ", stringify!(__siginfo)) ); - fn test_field_si_signo() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_signo) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_signo)) - ); - } - test_field_si_signo(); - fn test_field_si_errno() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_errno) as usize - ptr as usize - }, - 4usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_errno)) - ); - } - test_field_si_errno(); - fn test_field_si_code() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_code) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_code)) - ); - } - test_field_si_code(); - fn test_field_si_pid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_pid) as usize - ptr as usize - }, - 12usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_pid)) - ); - } - test_field_si_pid(); - fn test_field_si_uid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_uid) as usize - ptr as usize - }, - 16usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_uid)) - ); - } - test_field_si_uid(); - fn test_field_si_status() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_status) as usize - ptr as usize - }, - 20usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_status)) - ); - } - test_field_si_status(); - fn test_field_si_addr() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_addr) as usize - ptr as usize - }, - 24usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_addr)) - ); - } - test_field_si_addr(); - fn test_field_si_value() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_value) as usize - ptr as usize - }, - 32usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_value)) - ); - } - test_field_si_value(); - fn test_field_si_band() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).si_band) as usize - ptr as usize - }, - 40usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_band)) - ); - } - test_field_si_band(); - fn test_field___pad() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__siginfo>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__pad) as usize - ptr as usize - }, - 48usize, - concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(__pad)) - ); - } - test_field___pad(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_signo) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_signo)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_errno) as usize - ptr as usize }, + 4usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_errno)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_code) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_code)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_pid) as usize - ptr as usize }, + 12usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_pid)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_uid) as usize - ptr as usize }, + 16usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_uid)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_status) as usize - ptr as usize }, + 20usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_status)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_addr) as usize - ptr as usize }, + 24usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_addr)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_value) as usize - ptr as usize }, + 32usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_value)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).si_band) as usize - ptr as usize }, + 40usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(si_band)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__pad) as usize - ptr as usize }, + 48usize, + concat!("Offset of field: ", stringify!(__siginfo), "::", stringify!(__pad)) + ); } pub type siginfo_t = __siginfo; #[repr(C)] @@ -10717,6 +7424,8 @@ pub union __sigaction_u { } #[test] fn bindgen_test_layout___sigaction_u() { + const UNINIT: ::std::mem::MaybeUninit<__sigaction_u> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__sigaction_u>(), 8usize, @@ -10727,40 +7436,26 @@ fn bindgen_test_layout___sigaction_u() { 8usize, concat!("Alignment of ", stringify!(__sigaction_u)) ); - fn test_field___sa_handler() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__sigaction_u>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sa_handler) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__sigaction_u), - "::", - stringify!(__sa_handler) - ) - ); - } - test_field___sa_handler(); - fn test_field___sa_sigaction() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__sigaction_u>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sa_sigaction) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__sigaction_u), - "::", - stringify!(__sa_sigaction) - ) - ); - } - test_field___sa_sigaction(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sa_handler) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__sigaction_u), + "::", + stringify!(__sa_handler) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sa_sigaction) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__sigaction_u), + "::", + stringify!(__sa_sigaction) + ) + ); } #[repr(C)] #[derive(Copy, Clone)] @@ -10780,6 +7475,8 @@ pub struct __sigaction { } #[test] fn bindgen_test_layout___sigaction() { + const UNINIT: ::std::mem::MaybeUninit<__sigaction> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__sigaction>(), 24usize, @@ -10790,59 +7487,31 @@ fn bindgen_test_layout___sigaction() { 8usize, concat!("Alignment of ", stringify!(__sigaction)) ); - fn test_field___sigaction_u() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__sigaction>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sigaction_u) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__sigaction), - "::", - stringify!(__sigaction_u) - ) - ); - } - test_field___sigaction_u(); - fn test_field_sa_tramp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__sigaction>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sa_tramp) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(__sigaction), "::", stringify!(sa_tramp)) - ); - } - test_field_sa_tramp(); - fn test_field_sa_mask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__sigaction>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sa_mask) as usize - ptr as usize - }, - 16usize, - concat!("Offset of field: ", stringify!(__sigaction), "::", stringify!(sa_mask)) - ); - } - test_field_sa_mask(); - fn test_field_sa_flags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__sigaction>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sa_flags) as usize - ptr as usize - }, - 20usize, - concat!("Offset of field: ", stringify!(__sigaction), "::", stringify!(sa_flags)) - ); - } - test_field_sa_flags(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sigaction_u) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__sigaction), + "::", + stringify!(__sigaction_u) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sa_tramp) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(__sigaction), "::", stringify!(sa_tramp)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sa_mask) as usize - ptr as usize }, + 16usize, + concat!("Offset of field: ", stringify!(__sigaction), "::", stringify!(sa_mask)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sa_flags) as usize - ptr as usize }, + 20usize, + concat!("Offset of field: ", stringify!(__sigaction), "::", stringify!(sa_flags)) + ); } #[repr(C)] #[derive(Copy, Clone)] @@ -10853,6 +7522,8 @@ pub struct sigaction { } #[test] fn bindgen_test_layout_sigaction() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -10863,47 +7534,26 @@ fn bindgen_test_layout_sigaction() { 8usize, concat!("Alignment of ", stringify!(sigaction)) ); - fn test_field___sigaction_u() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__sigaction_u) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(sigaction), - "::", - stringify!(__sigaction_u) - ) - ); - } - test_field___sigaction_u(); - fn test_field_sa_mask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sa_mask) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(sigaction), "::", stringify!(sa_mask)) - ); - } - test_field_sa_mask(); - fn test_field_sa_flags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sa_flags) as usize - ptr as usize - }, - 12usize, - concat!("Offset of field: ", stringify!(sigaction), "::", stringify!(sa_flags)) - ); - } - test_field_sa_flags(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__sigaction_u) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(sigaction), + "::", + stringify!(__sigaction_u) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sa_mask) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(sigaction), "::", stringify!(sa_mask)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sa_flags) as usize - ptr as usize }, + 12usize, + concat!("Offset of field: ", stringify!(sigaction), "::", stringify!(sa_flags)) + ); } pub type sig_t = ::std::option::Option; #[repr(C)] @@ -10915,6 +7565,8 @@ pub struct sigvec { } #[test] fn bindgen_test_layout_sigvec() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -10925,42 +7577,21 @@ fn bindgen_test_layout_sigvec() { 8usize, concat!("Alignment of ", stringify!(sigvec)) ); - fn test_field_sv_handler() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sv_handler) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(sigvec), "::", stringify!(sv_handler)) - ); - } - test_field_sv_handler(); - fn test_field_sv_mask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sv_mask) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(sigvec), "::", stringify!(sv_mask)) - ); - } - test_field_sv_mask(); - fn test_field_sv_flags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sv_flags) as usize - ptr as usize - }, - 12usize, - concat!("Offset of field: ", stringify!(sigvec), "::", stringify!(sv_flags)) - ); - } - test_field_sv_flags(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sv_handler) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(sigvec), "::", stringify!(sv_handler)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sv_mask) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(sigvec), "::", stringify!(sv_mask)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sv_flags) as usize - ptr as usize }, + 12usize, + concat!("Offset of field: ", stringify!(sigvec), "::", stringify!(sv_flags)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -10970,6 +7601,8 @@ pub struct sigstack { } #[test] fn bindgen_test_layout_sigstack() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -10980,30 +7613,16 @@ fn bindgen_test_layout_sigstack() { 8usize, concat!("Alignment of ", stringify!(sigstack)) ); - fn test_field_ss_sp() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ss_sp) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(sigstack), "::", stringify!(ss_sp)) - ); - } - test_field_ss_sp(); - fn test_field_ss_onstack() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ss_onstack) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(sigstack), "::", stringify!(ss_onstack)) - ); - } - test_field_ss_onstack(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ss_sp) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(sigstack), "::", stringify!(ss_sp)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ss_onstack) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(sigstack), "::", stringify!(ss_onstack)) + ); } extern "C" { pub fn signal( @@ -11024,6 +7643,8 @@ pub struct timeval { } #[test] fn bindgen_test_layout_timeval() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -11034,30 +7655,16 @@ fn bindgen_test_layout_timeval() { 8usize, concat!("Alignment of ", stringify!(timeval)) ); - fn test_field_tv_sec() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).tv_sec) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(timeval), "::", stringify!(tv_sec)) - ); - } - test_field_tv_sec(); - fn test_field_tv_usec() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).tv_usec) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(timeval), "::", stringify!(tv_usec)) - ); - } - test_field_tv_usec(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).tv_sec) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(timeval), "::", stringify!(tv_sec)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).tv_usec) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(timeval), "::", stringify!(tv_usec)) + ); } pub type rlim_t = __uint64_t; #[repr(C)] @@ -11082,6 +7689,8 @@ pub struct rusage { } #[test] fn bindgen_test_layout_rusage() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 144usize, @@ -11092,198 +7701,86 @@ fn bindgen_test_layout_rusage() { 8usize, concat!("Alignment of ", stringify!(rusage)) ); - fn test_field_ru_utime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_utime) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_utime)) - ); - } - test_field_ru_utime(); - fn test_field_ru_stime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_stime) as usize - ptr as usize - }, - 16usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_stime)) - ); - } - test_field_ru_stime(); - fn test_field_ru_maxrss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_maxrss) as usize - ptr as usize - }, - 32usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_maxrss)) - ); - } - test_field_ru_maxrss(); - fn test_field_ru_ixrss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_ixrss) as usize - ptr as usize - }, - 40usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_ixrss)) - ); - } - test_field_ru_ixrss(); - fn test_field_ru_idrss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_idrss) as usize - ptr as usize - }, - 48usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_idrss)) - ); - } - test_field_ru_idrss(); - fn test_field_ru_isrss() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_isrss) as usize - ptr as usize - }, - 56usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_isrss)) - ); - } - test_field_ru_isrss(); - fn test_field_ru_minflt() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_minflt) as usize - ptr as usize - }, - 64usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_minflt)) - ); - } - test_field_ru_minflt(); - fn test_field_ru_majflt() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_majflt) as usize - ptr as usize - }, - 72usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_majflt)) - ); - } - test_field_ru_majflt(); - fn test_field_ru_nswap() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_nswap) as usize - ptr as usize - }, - 80usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nswap)) - ); - } - test_field_ru_nswap(); - fn test_field_ru_inblock() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_inblock) as usize - ptr as usize - }, - 88usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_inblock)) - ); - } - test_field_ru_inblock(); - fn test_field_ru_oublock() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_oublock) as usize - ptr as usize - }, - 96usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_oublock)) - ); - } - test_field_ru_oublock(); - fn test_field_ru_msgsnd() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_msgsnd) as usize - ptr as usize - }, - 104usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_msgsnd)) - ); - } - test_field_ru_msgsnd(); - fn test_field_ru_msgrcv() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_msgrcv) as usize - ptr as usize - }, - 112usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_msgrcv)) - ); - } - test_field_ru_msgrcv(); - fn test_field_ru_nsignals() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_nsignals) as usize - ptr as usize - }, - 120usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nsignals)) - ); - } - test_field_ru_nsignals(); - fn test_field_ru_nvcsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_nvcsw) as usize - ptr as usize - }, - 128usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nvcsw)) - ); - } - test_field_ru_nvcsw(); - fn test_field_ru_nivcsw() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ru_nivcsw) as usize - ptr as usize - }, - 136usize, - concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nivcsw)) - ); - } - test_field_ru_nivcsw(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_utime) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_utime)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_stime) as usize - ptr as usize }, + 16usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_stime)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_maxrss) as usize - ptr as usize }, + 32usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_maxrss)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_ixrss) as usize - ptr as usize }, + 40usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_ixrss)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_idrss) as usize - ptr as usize }, + 48usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_idrss)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_isrss) as usize - ptr as usize }, + 56usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_isrss)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_minflt) as usize - ptr as usize }, + 64usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_minflt)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_majflt) as usize - ptr as usize }, + 72usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_majflt)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_nswap) as usize - ptr as usize }, + 80usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nswap)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_inblock) as usize - ptr as usize }, + 88usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_inblock)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_oublock) as usize - ptr as usize }, + 96usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_oublock)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_msgsnd) as usize - ptr as usize }, + 104usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_msgsnd)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_msgrcv) as usize - ptr as usize }, + 112usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_msgrcv)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_nsignals) as usize - ptr as usize }, + 120usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nsignals)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_nvcsw) as usize - ptr as usize }, + 128usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nvcsw)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ru_nivcsw) as usize - ptr as usize }, + 136usize, + concat!("Offset of field: ", stringify!(rusage), "::", stringify!(ru_nivcsw)) + ); } pub type rusage_info_t = *mut ::std::os::raw::c_void; #[repr(C)] @@ -11303,6 +7800,8 @@ pub struct rusage_info_v0 { } #[test] fn bindgen_test_layout_rusage_info_v0() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 96usize, @@ -11313,193 +7812,116 @@ fn bindgen_test_layout_rusage_info_v0() { 8usize, concat!("Alignment of ", stringify!(rusage_info_v0)) ); - fn test_field_ri_uuid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_uuid) - ) - ); - } - test_field_ri_uuid(); - fn test_field_ri_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_user_time) - ) - ); - } - test_field_ri_user_time(); - fn test_field_ri_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_system_time) - ) - ); - } - test_field_ri_system_time(); - fn test_field_ri_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_pkg_idle_wkups) - ) - ); - } - test_field_ri_pkg_idle_wkups(); - fn test_field_ri_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_interrupt_wkups) - ) - ); - } - test_field_ri_interrupt_wkups(); - fn test_field_ri_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_pageins) - ) - ); - } - test_field_ri_pageins(); - fn test_field_ri_wired_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_wired_size) - ) - ); - } - test_field_ri_wired_size(); - fn test_field_ri_resident_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_resident_size) - ) - ); - } - test_field_ri_resident_size(); - fn test_field_ri_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_phys_footprint) - ) - ); - } - test_field_ri_phys_footprint(); - fn test_field_ri_proc_start_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_proc_start_abstime) - ) - ); - } - test_field_ri_proc_start_abstime(); - fn test_field_ri_proc_exit_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v0), - "::", - stringify!(ri_proc_exit_abstime) - ) - ); - } - test_field_ri_proc_exit_abstime(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_uuid) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_wired_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_resident_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_proc_start_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v0), + "::", + stringify!(ri_proc_exit_abstime) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -11524,6 +7946,8 @@ pub struct rusage_info_v1 { } #[test] fn bindgen_test_layout_rusage_info_v1() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 144usize, @@ -11534,305 +7958,186 @@ fn bindgen_test_layout_rusage_info_v1() { 8usize, concat!("Alignment of ", stringify!(rusage_info_v1)) ); - fn test_field_ri_uuid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_uuid) - ) - ); - } - test_field_ri_uuid(); - fn test_field_ri_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_user_time) - ) - ); - } - test_field_ri_user_time(); - fn test_field_ri_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_system_time) - ) - ); - } - test_field_ri_system_time(); - fn test_field_ri_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_pkg_idle_wkups) - ) - ); - } - test_field_ri_pkg_idle_wkups(); - fn test_field_ri_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_interrupt_wkups) - ) - ); - } - test_field_ri_interrupt_wkups(); - fn test_field_ri_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_pageins) - ) - ); - } - test_field_ri_pageins(); - fn test_field_ri_wired_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_wired_size) - ) - ); - } - test_field_ri_wired_size(); - fn test_field_ri_resident_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_resident_size) - ) - ); - } - test_field_ri_resident_size(); - fn test_field_ri_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_phys_footprint) - ) - ); - } - test_field_ri_phys_footprint(); - fn test_field_ri_proc_start_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_proc_start_abstime) - ) - ); - } - test_field_ri_proc_start_abstime(); - fn test_field_ri_proc_exit_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_proc_exit_abstime) - ) - ); - } - test_field_ri_proc_exit_abstime(); - fn test_field_ri_child_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_child_user_time) - ) - ); - } - test_field_ri_child_user_time(); - fn test_field_ri_child_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_child_system_time) - ) - ); - } - test_field_ri_child_system_time(); - fn test_field_ri_child_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_child_pkg_idle_wkups) - ) - ); - } - test_field_ri_child_pkg_idle_wkups(); - fn test_field_ri_child_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_child_interrupt_wkups) - ) - ); - } - test_field_ri_child_interrupt_wkups(); - fn test_field_ri_child_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_child_pageins) - ) - ); - } - test_field_ri_child_pageins(); - fn test_field_ri_child_elapsed_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v1), - "::", - stringify!(ri_child_elapsed_abstime) - ) - ); - } - test_field_ri_child_elapsed_abstime(); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct rusage_info_v2 { - pub ri_uuid: [u8; 16usize], - pub ri_user_time: u64, - pub ri_system_time: u64, - pub ri_pkg_idle_wkups: u64, - pub ri_interrupt_wkups: u64, - pub ri_pageins: u64, + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_uuid) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_wired_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_resident_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_proc_start_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_proc_exit_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_child_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_child_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_child_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_child_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_child_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v1), + "::", + stringify!(ri_child_elapsed_abstime) + ) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct rusage_info_v2 { + pub ri_uuid: [u8; 16usize], + pub ri_user_time: u64, + pub ri_system_time: u64, + pub ri_pkg_idle_wkups: u64, + pub ri_interrupt_wkups: u64, + pub ri_pageins: u64, pub ri_wired_size: u64, pub ri_resident_size: u64, pub ri_phys_footprint: u64, @@ -11849,6 +8154,8 @@ pub struct rusage_info_v2 { } #[test] fn bindgen_test_layout_rusage_info_v2() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 160usize, @@ -11859,329 +8166,196 @@ fn bindgen_test_layout_rusage_info_v2() { 8usize, concat!("Alignment of ", stringify!(rusage_info_v2)) ); - fn test_field_ri_uuid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_uuid) - ) - ); - } - test_field_ri_uuid(); - fn test_field_ri_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_user_time) - ) - ); - } - test_field_ri_user_time(); - fn test_field_ri_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_system_time) - ) - ); - } - test_field_ri_system_time(); - fn test_field_ri_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_pkg_idle_wkups) - ) - ); - } - test_field_ri_pkg_idle_wkups(); - fn test_field_ri_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_interrupt_wkups) - ) - ); - } - test_field_ri_interrupt_wkups(); - fn test_field_ri_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_pageins) - ) - ); - } - test_field_ri_pageins(); - fn test_field_ri_wired_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_wired_size) - ) - ); - } - test_field_ri_wired_size(); - fn test_field_ri_resident_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_resident_size) - ) - ); - } - test_field_ri_resident_size(); - fn test_field_ri_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_phys_footprint) - ) - ); - } - test_field_ri_phys_footprint(); - fn test_field_ri_proc_start_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_proc_start_abstime) - ) - ); - } - test_field_ri_proc_start_abstime(); - fn test_field_ri_proc_exit_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_proc_exit_abstime) - ) - ); - } - test_field_ri_proc_exit_abstime(); - fn test_field_ri_child_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_child_user_time) - ) - ); - } - test_field_ri_child_user_time(); - fn test_field_ri_child_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_child_system_time) - ) - ); - } - test_field_ri_child_system_time(); - fn test_field_ri_child_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_child_pkg_idle_wkups) - ) - ); - } - test_field_ri_child_pkg_idle_wkups(); - fn test_field_ri_child_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_child_interrupt_wkups) - ) - ); - } - test_field_ri_child_interrupt_wkups(); - fn test_field_ri_child_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_child_pageins) - ) - ); - } - test_field_ri_child_pageins(); - fn test_field_ri_child_elapsed_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_child_elapsed_abstime) - ) - ); - } - test_field_ri_child_elapsed_abstime(); - fn test_field_ri_diskio_bytesread() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize - }, - 144usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_diskio_bytesread) - ) - ); - } - test_field_ri_diskio_bytesread(); - fn test_field_ri_diskio_byteswritten() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v2), - "::", - stringify!(ri_diskio_byteswritten) - ) - ); - } - test_field_ri_diskio_byteswritten(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_uuid) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_wired_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_resident_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_proc_start_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_proc_exit_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_child_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_child_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_child_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_child_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_child_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_child_elapsed_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize }, + 144usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_diskio_bytesread) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v2), + "::", + stringify!(ri_diskio_byteswritten) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -12217,6 +8391,8 @@ pub struct rusage_info_v3 { } #[test] fn bindgen_test_layout_rusage_info_v3() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 232usize, @@ -12227,482 +8403,286 @@ fn bindgen_test_layout_rusage_info_v3() { 8usize, concat!("Alignment of ", stringify!(rusage_info_v3)) ); - fn test_field_ri_uuid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_uuid) - ) - ); - } - test_field_ri_uuid(); - fn test_field_ri_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_user_time) - ) - ); - } - test_field_ri_user_time(); - fn test_field_ri_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_system_time) - ) - ); - } - test_field_ri_system_time(); - fn test_field_ri_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_pkg_idle_wkups) - ) - ); - } - test_field_ri_pkg_idle_wkups(); - fn test_field_ri_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_interrupt_wkups) - ) - ); - } - test_field_ri_interrupt_wkups(); - fn test_field_ri_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_pageins) - ) - ); - } - test_field_ri_pageins(); - fn test_field_ri_wired_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_wired_size) - ) - ); - } - test_field_ri_wired_size(); - fn test_field_ri_resident_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_resident_size) - ) - ); - } - test_field_ri_resident_size(); - fn test_field_ri_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_phys_footprint) - ) - ); - } - test_field_ri_phys_footprint(); - fn test_field_ri_proc_start_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_proc_start_abstime) - ) - ); - } - test_field_ri_proc_start_abstime(); - fn test_field_ri_proc_exit_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_proc_exit_abstime) - ) - ); - } - test_field_ri_proc_exit_abstime(); - fn test_field_ri_child_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_child_user_time) - ) - ); - } - test_field_ri_child_user_time(); - fn test_field_ri_child_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_child_system_time) - ) - ); - } - test_field_ri_child_system_time(); - fn test_field_ri_child_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_child_pkg_idle_wkups) - ) - ); - } - test_field_ri_child_pkg_idle_wkups(); - fn test_field_ri_child_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_child_interrupt_wkups) - ) - ); - } - test_field_ri_child_interrupt_wkups(); - fn test_field_ri_child_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_child_pageins) - ) - ); - } - test_field_ri_child_pageins(); - fn test_field_ri_child_elapsed_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_child_elapsed_abstime) - ) - ); - } - test_field_ri_child_elapsed_abstime(); - fn test_field_ri_diskio_bytesread() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize - }, - 144usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_diskio_bytesread) - ) - ); - } - test_field_ri_diskio_bytesread(); - fn test_field_ri_diskio_byteswritten() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_diskio_byteswritten) - ) - ); - } - test_field_ri_diskio_byteswritten(); - fn test_field_ri_cpu_time_qos_default() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_default) as usize - ptr as usize - }, - 160usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_default) - ) - ); - } - test_field_ri_cpu_time_qos_default(); - fn test_field_ri_cpu_time_qos_maintenance() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_maintenance) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_maintenance) - ) - ); - } - test_field_ri_cpu_time_qos_maintenance(); - fn test_field_ri_cpu_time_qos_background() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_background) as usize - ptr as usize - }, - 176usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_background) - ) - ); - } - test_field_ri_cpu_time_qos_background(); - fn test_field_ri_cpu_time_qos_utility() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_utility) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_utility) - ) - ); - } - test_field_ri_cpu_time_qos_utility(); - fn test_field_ri_cpu_time_qos_legacy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_legacy) as usize - ptr as usize - }, - 192usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_legacy) - ) - ); - } - test_field_ri_cpu_time_qos_legacy(); - fn test_field_ri_cpu_time_qos_user_initiated() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_initiated) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_user_initiated) - ) - ); - } - test_field_ri_cpu_time_qos_user_initiated(); - fn test_field_ri_cpu_time_qos_user_interactive() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_interactive) as usize - ptr as usize - }, - 208usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_cpu_time_qos_user_interactive) - ) - ); - } - test_field_ri_cpu_time_qos_user_interactive(); - fn test_field_ri_billed_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_billed_system_time) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_billed_system_time) - ) - ); - } - test_field_ri_billed_system_time(); - fn test_field_ri_serviced_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_serviced_system_time) as usize - ptr as usize - }, - 224usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v3), - "::", - stringify!(ri_serviced_system_time) - ) - ); - } - test_field_ri_serviced_system_time(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_uuid) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_wired_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_resident_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_proc_start_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_proc_exit_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_child_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_child_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_child_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_child_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_child_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_child_elapsed_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize }, + 144usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_diskio_bytesread) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_diskio_byteswritten) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_default) as usize - ptr as usize }, + 160usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_default) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_maintenance) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_maintenance) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_background) as usize - ptr as usize }, + 176usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_background) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_utility) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_utility) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_legacy) as usize - ptr as usize }, + 192usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_legacy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_initiated) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_user_initiated) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_interactive) as usize - ptr as usize }, + 208usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_cpu_time_qos_user_interactive) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_billed_system_time) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_billed_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_serviced_system_time) as usize - ptr as usize }, + 224usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v3), + "::", + stringify!(ri_serviced_system_time) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -12746,6 +8726,8 @@ pub struct rusage_info_v4 { } #[test] fn bindgen_test_layout_rusage_info_v4() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 296usize, @@ -12756,618 +8738,366 @@ fn bindgen_test_layout_rusage_info_v4() { 8usize, concat!("Alignment of ", stringify!(rusage_info_v4)) ); - fn test_field_ri_uuid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_uuid) - ) - ); - } - test_field_ri_uuid(); - fn test_field_ri_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_user_time) - ) - ); - } - test_field_ri_user_time(); - fn test_field_ri_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_system_time) - ) - ); - } - test_field_ri_system_time(); - fn test_field_ri_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_pkg_idle_wkups) - ) - ); - } - test_field_ri_pkg_idle_wkups(); - fn test_field_ri_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_interrupt_wkups) - ) - ); - } - test_field_ri_interrupt_wkups(); - fn test_field_ri_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_pageins) - ) - ); - } - test_field_ri_pageins(); - fn test_field_ri_wired_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_wired_size) - ) - ); - } - test_field_ri_wired_size(); - fn test_field_ri_resident_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_resident_size) - ) - ); - } - test_field_ri_resident_size(); - fn test_field_ri_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_phys_footprint) - ) - ); - } - test_field_ri_phys_footprint(); - fn test_field_ri_proc_start_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_proc_start_abstime) - ) - ); - } - test_field_ri_proc_start_abstime(); - fn test_field_ri_proc_exit_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_proc_exit_abstime) - ) - ); - } - test_field_ri_proc_exit_abstime(); - fn test_field_ri_child_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_child_user_time) - ) - ); - } - test_field_ri_child_user_time(); - fn test_field_ri_child_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_child_system_time) - ) - ); - } - test_field_ri_child_system_time(); - fn test_field_ri_child_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_child_pkg_idle_wkups) - ) - ); - } - test_field_ri_child_pkg_idle_wkups(); - fn test_field_ri_child_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_child_interrupt_wkups) - ) - ); - } - test_field_ri_child_interrupt_wkups(); - fn test_field_ri_child_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_child_pageins) - ) - ); - } - test_field_ri_child_pageins(); - fn test_field_ri_child_elapsed_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_child_elapsed_abstime) - ) - ); - } - test_field_ri_child_elapsed_abstime(); - fn test_field_ri_diskio_bytesread() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize - }, - 144usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_diskio_bytesread) - ) - ); - } - test_field_ri_diskio_bytesread(); - fn test_field_ri_diskio_byteswritten() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_diskio_byteswritten) - ) - ); - } - test_field_ri_diskio_byteswritten(); - fn test_field_ri_cpu_time_qos_default() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_default) as usize - ptr as usize - }, - 160usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_default) - ) - ); - } - test_field_ri_cpu_time_qos_default(); - fn test_field_ri_cpu_time_qos_maintenance() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_maintenance) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_maintenance) - ) - ); - } - test_field_ri_cpu_time_qos_maintenance(); - fn test_field_ri_cpu_time_qos_background() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_background) as usize - ptr as usize - }, - 176usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_background) - ) - ); - } - test_field_ri_cpu_time_qos_background(); - fn test_field_ri_cpu_time_qos_utility() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_utility) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_utility) - ) - ); - } - test_field_ri_cpu_time_qos_utility(); - fn test_field_ri_cpu_time_qos_legacy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_legacy) as usize - ptr as usize - }, - 192usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_legacy) - ) - ); - } - test_field_ri_cpu_time_qos_legacy(); - fn test_field_ri_cpu_time_qos_user_initiated() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_initiated) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_user_initiated) - ) - ); - } - test_field_ri_cpu_time_qos_user_initiated(); - fn test_field_ri_cpu_time_qos_user_interactive() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_interactive) as usize - ptr as usize - }, - 208usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cpu_time_qos_user_interactive) - ) - ); - } - test_field_ri_cpu_time_qos_user_interactive(); - fn test_field_ri_billed_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_billed_system_time) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_billed_system_time) - ) - ); - } - test_field_ri_billed_system_time(); - fn test_field_ri_serviced_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_serviced_system_time) as usize - ptr as usize - }, - 224usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_serviced_system_time) - ) - ); - } - test_field_ri_serviced_system_time(); - fn test_field_ri_logical_writes() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_logical_writes) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_logical_writes) - ) - ); - } - test_field_ri_logical_writes(); - fn test_field_ri_lifetime_max_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_lifetime_max_phys_footprint) as usize - ptr as usize - }, - 240usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_lifetime_max_phys_footprint) - ) - ); - } - test_field_ri_lifetime_max_phys_footprint(); - fn test_field_ri_instructions() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_instructions) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_instructions) - ) - ); - } - test_field_ri_instructions(); - fn test_field_ri_cycles() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cycles) as usize - ptr as usize - }, - 256usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_cycles) - ) - ); - } - test_field_ri_cycles(); - fn test_field_ri_billed_energy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_billed_energy) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_billed_energy) - ) - ); - } - test_field_ri_billed_energy(); - fn test_field_ri_serviced_energy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_serviced_energy) as usize - ptr as usize - }, - 272usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_serviced_energy) - ) - ); - } - test_field_ri_serviced_energy(); - fn test_field_ri_interval_max_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interval_max_phys_footprint) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_interval_max_phys_footprint) - ) - ); - } - test_field_ri_interval_max_phys_footprint(); - fn test_field_ri_runnable_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_runnable_time) as usize - ptr as usize - }, - 288usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v4), - "::", - stringify!(ri_runnable_time) - ) - ); - } - test_field_ri_runnable_time(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_uuid) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_wired_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_resident_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_proc_start_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_proc_exit_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_child_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_child_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_child_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_child_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_child_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_child_elapsed_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize }, + 144usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_diskio_bytesread) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_diskio_byteswritten) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_default) as usize - ptr as usize }, + 160usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_default) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_maintenance) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_maintenance) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_background) as usize - ptr as usize }, + 176usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_background) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_utility) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_utility) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_legacy) as usize - ptr as usize }, + 192usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_legacy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_initiated) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_user_initiated) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_interactive) as usize - ptr as usize }, + 208usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cpu_time_qos_user_interactive) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_billed_system_time) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_billed_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_serviced_system_time) as usize - ptr as usize }, + 224usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_serviced_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_logical_writes) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_logical_writes) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_lifetime_max_phys_footprint) as usize - ptr as usize }, + 240usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_lifetime_max_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_instructions) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_instructions) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cycles) as usize - ptr as usize }, + 256usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_cycles) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_billed_energy) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_billed_energy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_serviced_energy) as usize - ptr as usize }, + 272usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_serviced_energy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interval_max_phys_footprint) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_interval_max_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_runnable_time) as usize - ptr as usize }, + 288usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v4), + "::", + stringify!(ri_runnable_time) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -13412,6 +9142,8 @@ pub struct rusage_info_v5 { } #[test] fn bindgen_test_layout_rusage_info_v5() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 304usize, @@ -13422,635 +9154,376 @@ fn bindgen_test_layout_rusage_info_v5() { 8usize, concat!("Alignment of ", stringify!(rusage_info_v5)) ); - fn test_field_ri_uuid() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_uuid) - ) - ); - } - test_field_ri_uuid(); - fn test_field_ri_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_user_time) - ) - ); - } - test_field_ri_user_time(); - fn test_field_ri_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_system_time) - ) - ); - } - test_field_ri_system_time(); - fn test_field_ri_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_pkg_idle_wkups) - ) - ); - } - test_field_ri_pkg_idle_wkups(); - fn test_field_ri_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_interrupt_wkups) - ) - ); - } - test_field_ri_interrupt_wkups(); - fn test_field_ri_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize - }, - 48usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_pageins) - ) - ); - } - test_field_ri_pageins(); - fn test_field_ri_wired_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize - }, - 56usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_wired_size) - ) - ); - } - test_field_ri_wired_size(); - fn test_field_ri_resident_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize - }, - 64usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_resident_size) - ) - ); - } - test_field_ri_resident_size(); - fn test_field_ri_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize - }, - 72usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_phys_footprint) - ) - ); - } - test_field_ri_phys_footprint(); - fn test_field_ri_proc_start_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize - }, - 80usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_proc_start_abstime) - ) - ); - } - test_field_ri_proc_start_abstime(); - fn test_field_ri_proc_exit_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize - }, - 88usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_proc_exit_abstime) - ) - ); - } - test_field_ri_proc_exit_abstime(); - fn test_field_ri_child_user_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize - }, - 96usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_child_user_time) - ) - ); - } - test_field_ri_child_user_time(); - fn test_field_ri_child_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize - }, - 104usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_child_system_time) - ) - ); - } - test_field_ri_child_system_time(); - fn test_field_ri_child_pkg_idle_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize - }, - 112usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_child_pkg_idle_wkups) - ) - ); - } - test_field_ri_child_pkg_idle_wkups(); - fn test_field_ri_child_interrupt_wkups() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize - }, - 120usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_child_interrupt_wkups) - ) - ); - } - test_field_ri_child_interrupt_wkups(); - fn test_field_ri_child_pageins() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize - }, - 128usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_child_pageins) - ) - ); - } - test_field_ri_child_pageins(); - fn test_field_ri_child_elapsed_abstime() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize - }, - 136usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_child_elapsed_abstime) - ) - ); - } - test_field_ri_child_elapsed_abstime(); - fn test_field_ri_diskio_bytesread() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize - }, - 144usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_diskio_bytesread) - ) - ); - } - test_field_ri_diskio_bytesread(); - fn test_field_ri_diskio_byteswritten() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize - }, - 152usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_diskio_byteswritten) - ) - ); - } - test_field_ri_diskio_byteswritten(); - fn test_field_ri_cpu_time_qos_default() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_default) as usize - ptr as usize - }, - 160usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_default) - ) - ); - } - test_field_ri_cpu_time_qos_default(); - fn test_field_ri_cpu_time_qos_maintenance() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_maintenance) as usize - ptr as usize - }, - 168usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_maintenance) - ) - ); - } - test_field_ri_cpu_time_qos_maintenance(); - fn test_field_ri_cpu_time_qos_background() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_background) as usize - ptr as usize - }, - 176usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_background) - ) - ); - } - test_field_ri_cpu_time_qos_background(); - fn test_field_ri_cpu_time_qos_utility() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_utility) as usize - ptr as usize - }, - 184usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_utility) - ) - ); - } - test_field_ri_cpu_time_qos_utility(); - fn test_field_ri_cpu_time_qos_legacy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_legacy) as usize - ptr as usize - }, - 192usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_legacy) - ) - ); - } - test_field_ri_cpu_time_qos_legacy(); - fn test_field_ri_cpu_time_qos_user_initiated() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_initiated) as usize - ptr as usize - }, - 200usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_user_initiated) - ) - ); - } - test_field_ri_cpu_time_qos_user_initiated(); - fn test_field_ri_cpu_time_qos_user_interactive() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_interactive) as usize - ptr as usize - }, - 208usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cpu_time_qos_user_interactive) - ) - ); - } - test_field_ri_cpu_time_qos_user_interactive(); - fn test_field_ri_billed_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_billed_system_time) as usize - ptr as usize - }, - 216usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_billed_system_time) - ) - ); - } - test_field_ri_billed_system_time(); - fn test_field_ri_serviced_system_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_serviced_system_time) as usize - ptr as usize - }, - 224usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_serviced_system_time) - ) - ); - } - test_field_ri_serviced_system_time(); - fn test_field_ri_logical_writes() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_logical_writes) as usize - ptr as usize - }, - 232usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_logical_writes) - ) - ); - } - test_field_ri_logical_writes(); - fn test_field_ri_lifetime_max_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_lifetime_max_phys_footprint) as usize - ptr as usize - }, - 240usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_lifetime_max_phys_footprint) - ) - ); - } - test_field_ri_lifetime_max_phys_footprint(); - fn test_field_ri_instructions() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_instructions) as usize - ptr as usize - }, - 248usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_instructions) - ) - ); - } - test_field_ri_instructions(); - fn test_field_ri_cycles() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_cycles) as usize - ptr as usize - }, - 256usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_cycles) - ) - ); - } - test_field_ri_cycles(); - fn test_field_ri_billed_energy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_billed_energy) as usize - ptr as usize - }, - 264usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_billed_energy) - ) - ); - } - test_field_ri_billed_energy(); - fn test_field_ri_serviced_energy() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_serviced_energy) as usize - ptr as usize - }, - 272usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_serviced_energy) - ) - ); - } - test_field_ri_serviced_energy(); - fn test_field_ri_interval_max_phys_footprint() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_interval_max_phys_footprint) as usize - ptr as usize - }, - 280usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_interval_max_phys_footprint) - ) - ); - } - test_field_ri_interval_max_phys_footprint(); - fn test_field_ri_runnable_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_runnable_time) as usize - ptr as usize - }, - 288usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_runnable_time) - ) - ); - } - test_field_ri_runnable_time(); - fn test_field_ri_flags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).ri_flags) as usize - ptr as usize - }, - 296usize, - concat!( - "Offset of field: ", - stringify!(rusage_info_v5), - "::", - stringify!(ri_flags) - ) - ); - } - test_field_ri_flags(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_uuid) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_uuid) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_user_time) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_system_time) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pkg_idle_wkups) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interrupt_wkups) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_pageins) as usize - ptr as usize }, + 48usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_wired_size) as usize - ptr as usize }, + 56usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_wired_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_resident_size) as usize - ptr as usize }, + 64usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_resident_size) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_phys_footprint) as usize - ptr as usize }, + 72usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_start_abstime) as usize - ptr as usize }, + 80usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_proc_start_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_proc_exit_abstime) as usize - ptr as usize }, + 88usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_proc_exit_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_user_time) as usize - ptr as usize }, + 96usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_child_user_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_system_time) as usize - ptr as usize }, + 104usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_child_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pkg_idle_wkups) as usize - ptr as usize }, + 112usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_child_pkg_idle_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_interrupt_wkups) as usize - ptr as usize }, + 120usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_child_interrupt_wkups) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_pageins) as usize - ptr as usize }, + 128usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_child_pageins) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_child_elapsed_abstime) as usize - ptr as usize }, + 136usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_child_elapsed_abstime) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_bytesread) as usize - ptr as usize }, + 144usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_diskio_bytesread) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_diskio_byteswritten) as usize - ptr as usize }, + 152usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_diskio_byteswritten) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_default) as usize - ptr as usize }, + 160usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_default) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_maintenance) as usize - ptr as usize }, + 168usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_maintenance) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_background) as usize - ptr as usize }, + 176usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_background) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_utility) as usize - ptr as usize }, + 184usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_utility) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_legacy) as usize - ptr as usize }, + 192usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_legacy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_initiated) as usize - ptr as usize }, + 200usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_user_initiated) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cpu_time_qos_user_interactive) as usize - ptr as usize }, + 208usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cpu_time_qos_user_interactive) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_billed_system_time) as usize - ptr as usize }, + 216usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_billed_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_serviced_system_time) as usize - ptr as usize }, + 224usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_serviced_system_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_logical_writes) as usize - ptr as usize }, + 232usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_logical_writes) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_lifetime_max_phys_footprint) as usize - ptr as usize }, + 240usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_lifetime_max_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_instructions) as usize - ptr as usize }, + 248usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_instructions) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_cycles) as usize - ptr as usize }, + 256usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_cycles) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_billed_energy) as usize - ptr as usize }, + 264usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_billed_energy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_serviced_energy) as usize - ptr as usize }, + 272usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_serviced_energy) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_interval_max_phys_footprint) as usize - ptr as usize }, + 280usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_interval_max_phys_footprint) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_runnable_time) as usize - ptr as usize }, + 288usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_runnable_time) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).ri_flags) as usize - ptr as usize }, + 296usize, + concat!( + "Offset of field: ", + stringify!(rusage_info_v5), + "::", + stringify!(ri_flags) + ) + ); } pub type rusage_info_current = rusage_info_v5; #[repr(C)] @@ -14061,6 +9534,8 @@ pub struct rlimit { } #[test] fn bindgen_test_layout_rlimit() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -14071,30 +9546,16 @@ fn bindgen_test_layout_rlimit() { 8usize, concat!("Alignment of ", stringify!(rlimit)) ); - fn test_field_rlim_cur() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).rlim_cur) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(rlimit), "::", stringify!(rlim_cur)) - ); - } - test_field_rlim_cur(); - fn test_field_rlim_max() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).rlim_max) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(rlimit), "::", stringify!(rlim_max)) - ); - } - test_field_rlim_max(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).rlim_cur) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(rlimit), "::", stringify!(rlim_cur)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).rlim_max) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(rlimit), "::", stringify!(rlim_max)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -14104,6 +9565,8 @@ pub struct proc_rlimit_control_wakeupmon { } #[test] fn bindgen_test_layout_proc_rlimit_control_wakeupmon() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -14114,40 +9577,26 @@ fn bindgen_test_layout_proc_rlimit_control_wakeupmon() { 4usize, concat!("Alignment of ", stringify!(proc_rlimit_control_wakeupmon)) ); - fn test_field_wm_flags() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).wm_flags) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(proc_rlimit_control_wakeupmon), - "::", - stringify!(wm_flags) - ) - ); - } - test_field_wm_flags(); - fn test_field_wm_rate() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).wm_rate) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(proc_rlimit_control_wakeupmon), - "::", - stringify!(wm_rate) - ) - ); - } - test_field_wm_rate(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).wm_flags) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(proc_rlimit_control_wakeupmon), + "::", + stringify!(wm_flags) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).wm_rate) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(proc_rlimit_control_wakeupmon), + "::", + stringify!(wm_rate) + ) + ); } extern "C" { pub fn getpriority(arg1: ::std::os::raw::c_int, arg2: id_t) -> ::std::os::raw::c_int; @@ -14351,6 +9800,8 @@ impl wait__bindgen_ty_2 { } #[test] fn bindgen_test_layout_wait() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 4usize, @@ -14361,42 +9812,21 @@ fn bindgen_test_layout_wait() { 4usize, concat!("Alignment of ", stringify!(wait)) ); - fn test_field_w_status() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).w_status) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(wait), "::", stringify!(w_status)) - ); - } - test_field_w_status(); - fn test_field_w_T() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).w_T) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(wait), "::", stringify!(w_T)) - ); - } - test_field_w_T(); - fn test_field_w_S() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).w_S) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(wait), "::", stringify!(w_S)) - ); - } - test_field_w_S(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).w_status) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(wait), "::", stringify!(w_status)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).w_T) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(wait), "::", stringify!(w_T)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).w_S) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(wait), "::", stringify!(w_S)) + ); } extern "C" { pub fn wait(arg1: *mut ::std::os::raw::c_int) -> pid_t; @@ -14437,6 +9867,8 @@ pub struct div_t { } #[test] fn bindgen_test_layout_div_t() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -14447,30 +9879,16 @@ fn bindgen_test_layout_div_t() { 4usize, concat!("Alignment of ", stringify!(div_t)) ); - fn test_field_quot() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).quot) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(div_t), "::", stringify!(quot)) - ); - } - test_field_quot(); - fn test_field_rem() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).rem) as usize - ptr as usize - }, - 4usize, - concat!("Offset of field: ", stringify!(div_t), "::", stringify!(rem)) - ); - } - test_field_rem(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).quot) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(div_t), "::", stringify!(quot)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).rem) as usize - ptr as usize }, + 4usize, + concat!("Offset of field: ", stringify!(div_t), "::", stringify!(rem)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -14480,6 +9898,8 @@ pub struct ldiv_t { } #[test] fn bindgen_test_layout_ldiv_t() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -14490,30 +9910,16 @@ fn bindgen_test_layout_ldiv_t() { 8usize, concat!("Alignment of ", stringify!(ldiv_t)) ); - fn test_field_quot() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).quot) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(ldiv_t), "::", stringify!(quot)) - ); - } - test_field_quot(); - fn test_field_rem() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).rem) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(ldiv_t), "::", stringify!(rem)) - ); - } - test_field_rem(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).quot) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(ldiv_t), "::", stringify!(quot)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).rem) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(ldiv_t), "::", stringify!(rem)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -14523,6 +9929,8 @@ pub struct lldiv_t { } #[test] fn bindgen_test_layout_lldiv_t() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -14533,30 +9941,16 @@ fn bindgen_test_layout_lldiv_t() { 8usize, concat!("Alignment of ", stringify!(lldiv_t)) ); - fn test_field_quot() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).quot) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(lldiv_t), "::", stringify!(quot)) - ); - } - test_field_quot(); - fn test_field_rem() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).rem) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(lldiv_t), "::", stringify!(rem)) - ); - } - test_field_rem(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).quot) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(lldiv_t), "::", stringify!(quot)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).rem) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(lldiv_t), "::", stringify!(rem)) + ); } extern "C" { pub static mut __mb_cur_max: ::std::os::raw::c_int; @@ -14574,7 +9968,7 @@ extern "C" { pub fn realloc(__ptr: *mut ::std::os::raw::c_void, __size: ::std::os::raw::c_ulong) -> *mut ::std::os::raw::c_void; } extern "C" { - pub fn valloc(arg1: size_t) -> *mut ::std::os::raw::c_void; + pub fn valloc(arg1: usize) -> *mut ::std::os::raw::c_void; } extern "C" { pub fn aligned_alloc( @@ -14585,12 +9979,12 @@ extern "C" { extern "C" { pub fn posix_memalign( __memptr: *mut *mut ::std::os::raw::c_void, - __alignment: size_t, - __size: size_t, + __alignment: usize, + __size: usize, ) -> ::std::os::raw::c_int; } extern "C" { - pub fn abort(); + pub fn abort() -> !; } extern "C" { pub fn abs(arg1: ::std::os::raw::c_int) -> ::std::os::raw::c_int; @@ -14614,8 +10008,8 @@ extern "C" { pub fn bsearch( __key: *const ::std::os::raw::c_void, __base: *const ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: ::std::option::Option< unsafe extern "C" fn( arg1: *const ::std::os::raw::c_void, @@ -14628,7 +10022,7 @@ extern "C" { pub fn div(arg1: ::std::os::raw::c_int, arg2: ::std::os::raw::c_int) -> div_t; } extern "C" { - pub fn exit(arg1: ::std::os::raw::c_int); + pub fn exit(arg1: ::std::os::raw::c_int) -> !; } extern "C" { pub fn getenv(arg1: *const ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char; @@ -14646,19 +10040,19 @@ extern "C" { pub fn lldiv(arg1: ::std::os::raw::c_longlong, arg2: ::std::os::raw::c_longlong) -> lldiv_t; } extern "C" { - pub fn mblen(__s: *const ::std::os::raw::c_char, __n: size_t) -> ::std::os::raw::c_int; + pub fn mblen(__s: *const ::std::os::raw::c_char, __n: usize) -> ::std::os::raw::c_int; } extern "C" { - pub fn mbstowcs(arg1: *mut wchar_t, arg2: *const ::std::os::raw::c_char, arg3: size_t) -> size_t; + pub fn mbstowcs(arg1: *mut wchar_t, arg2: *const ::std::os::raw::c_char, arg3: usize) -> usize; } extern "C" { - pub fn mbtowc(arg1: *mut wchar_t, arg2: *const ::std::os::raw::c_char, arg3: size_t) -> ::std::os::raw::c_int; + pub fn mbtowc(arg1: *mut wchar_t, arg2: *const ::std::os::raw::c_char, arg3: usize) -> ::std::os::raw::c_int; } extern "C" { pub fn qsort( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: ::std::option::Option< unsafe extern "C" fn( arg1: *const ::std::os::raw::c_void, @@ -14714,13 +10108,13 @@ extern "C" { pub fn system(arg1: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int; } extern "C" { - pub fn wcstombs(arg1: *mut ::std::os::raw::c_char, arg2: *const wchar_t, arg3: size_t) -> size_t; + pub fn wcstombs(arg1: *mut ::std::os::raw::c_char, arg2: *const wchar_t, arg3: usize) -> usize; } extern "C" { pub fn wctomb(arg1: *mut ::std::os::raw::c_char, arg2: wchar_t) -> ::std::os::raw::c_int; } extern "C" { - pub fn _Exit(arg1: ::std::os::raw::c_int); + pub fn _Exit(arg1: ::std::os::raw::c_int) -> !; } extern "C" { pub fn a64l(arg1: *const ::std::os::raw::c_char) -> ::std::os::raw::c_long; @@ -14768,7 +10162,7 @@ extern "C" { pub fn initstate( arg1: ::std::os::raw::c_uint, arg2: *mut ::std::os::raw::c_char, - arg3: size_t, + arg3: usize, ) -> *mut ::std::os::raw::c_char; } extern "C" { @@ -14805,7 +10199,7 @@ extern "C" { pub fn ptsname_r( fildes: ::std::os::raw::c_int, buffer: *mut ::std::os::raw::c_char, - buflen: size_t, + buflen: usize, ) -> ::std::os::raw::c_int; } extern "C" { @@ -14860,7 +10254,7 @@ extern "C" { pub fn arc4random_addrandom(arg1: *mut ::std::os::raw::c_uchar, arg2: ::std::os::raw::c_int); } extern "C" { - pub fn arc4random_buf(__buf: *mut ::std::os::raw::c_void, __nbytes: size_t); + pub fn arc4random_buf(__buf: *mut ::std::os::raw::c_void, __nbytes: usize); } extern "C" { pub fn arc4random_stir(); @@ -14875,8 +10269,8 @@ extern "C" { pub fn bsearch_b( __key: *const ::std::os::raw::c_void, __base: *const ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: *mut ::std::os::raw::c_void, ) -> *mut ::std::os::raw::c_void; } @@ -14967,8 +10361,8 @@ extern "C" { extern "C" { pub fn heapsort( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: ::std::option::Option< unsafe extern "C" fn( arg1: *const ::std::os::raw::c_void, @@ -14980,16 +10374,16 @@ extern "C" { extern "C" { pub fn heapsort_b( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: *mut ::std::os::raw::c_void, ) -> ::std::os::raw::c_int; } extern "C" { pub fn mergesort( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: ::std::option::Option< unsafe extern "C" fn( arg1: *const ::std::os::raw::c_void, @@ -15001,16 +10395,16 @@ extern "C" { extern "C" { pub fn mergesort_b( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: *mut ::std::os::raw::c_void, ) -> ::std::os::raw::c_int; } extern "C" { pub fn psort( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: ::std::option::Option< unsafe extern "C" fn( arg1: *const ::std::os::raw::c_void, @@ -15022,16 +10416,16 @@ extern "C" { extern "C" { pub fn psort_b( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: *mut ::std::os::raw::c_void, ); } extern "C" { pub fn psort_r( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, arg1: *mut ::std::os::raw::c_void, __compar: ::std::option::Option< unsafe extern "C" fn( @@ -15045,16 +10439,16 @@ extern "C" { extern "C" { pub fn qsort_b( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, __compar: *mut ::std::os::raw::c_void, ); } extern "C" { pub fn qsort_r( __base: *mut ::std::os::raw::c_void, - __nel: size_t, - __width: size_t, + __nel: usize, + __width: usize, arg1: *mut ::std::os::raw::c_void, __compar: ::std::option::Option< unsafe extern "C" fn( @@ -15091,7 +10485,7 @@ extern "C" { pub fn srandomdev(); } extern "C" { - pub fn reallocf(__ptr: *mut ::std::os::raw::c_void, __size: size_t) -> *mut ::std::os::raw::c_void; + pub fn reallocf(__ptr: *mut ::std::os::raw::c_void, __size: usize) -> *mut ::std::os::raw::c_void; } extern "C" { pub fn strtonum( @@ -15148,6 +10542,7 @@ pub const DUCKDB_TYPE_DUCKDB_TYPE_STRUCT: DUCKDB_TYPE = 25; pub const DUCKDB_TYPE_DUCKDB_TYPE_MAP: DUCKDB_TYPE = 26; pub const DUCKDB_TYPE_DUCKDB_TYPE_UUID: DUCKDB_TYPE = 27; pub const DUCKDB_TYPE_DUCKDB_TYPE_JSON: DUCKDB_TYPE = 28; +pub const DUCKDB_TYPE_DUCKDB_TYPE_UNION: DUCKDB_TYPE = 29; pub type DUCKDB_TYPE = ::std::os::raw::c_uint; pub use self::DUCKDB_TYPE as duckdb_type; #[doc = "! Days are stored as days since 1970-01-01"] @@ -15159,6 +10554,8 @@ pub struct duckdb_date { } #[test] fn bindgen_test_layout_duckdb_date() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 4usize, @@ -15169,18 +10566,11 @@ fn bindgen_test_layout_duckdb_date() { 4usize, concat!("Alignment of ", stringify!(duckdb_date)) ); - fn test_field_days() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).days) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(duckdb_date), "::", stringify!(days)) - ); - } - test_field_days(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).days) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(duckdb_date), "::", stringify!(days)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15191,6 +10581,8 @@ pub struct duckdb_date_struct { } #[test] fn bindgen_test_layout_duckdb_date_struct() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -15201,57 +10593,36 @@ fn bindgen_test_layout_duckdb_date_struct() { 4usize, concat!("Alignment of ", stringify!(duckdb_date_struct)) ); - fn test_field_year() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).year) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_date_struct), - "::", - stringify!(year) - ) - ); - } - test_field_year(); - fn test_field_month() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).month) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(duckdb_date_struct), - "::", - stringify!(month) - ) - ); - } - test_field_month(); - fn test_field_day() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).day) as usize - ptr as usize - }, - 5usize, - concat!( - "Offset of field: ", - stringify!(duckdb_date_struct), - "::", - stringify!(day) - ) - ); - } - test_field_day(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).year) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_date_struct), + "::", + stringify!(year) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).month) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(duckdb_date_struct), + "::", + stringify!(month) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).day) as usize - ptr as usize }, + 5usize, + concat!( + "Offset of field: ", + stringify!(duckdb_date_struct), + "::", + stringify!(day) + ) + ); } #[doc = "! Time is stored as microseconds since 00:00:00"] #[doc = "! Use the duckdb_from_time/duckdb_to_time function to extract individual information"] @@ -15262,6 +10633,8 @@ pub struct duckdb_time { } #[test] fn bindgen_test_layout_duckdb_time() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -15272,18 +10645,11 @@ fn bindgen_test_layout_duckdb_time() { 8usize, concat!("Alignment of ", stringify!(duckdb_time)) ); - fn test_field_micros() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(duckdb_time), "::", stringify!(micros)) - ); - } - test_field_micros(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(duckdb_time), "::", stringify!(micros)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15295,6 +10661,8 @@ pub struct duckdb_time_struct { } #[test] fn bindgen_test_layout_duckdb_time_struct() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -15305,74 +10673,46 @@ fn bindgen_test_layout_duckdb_time_struct() { 4usize, concat!("Alignment of ", stringify!(duckdb_time_struct)) ); - fn test_field_hour() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).hour) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_time_struct), - "::", - stringify!(hour) - ) - ); - } - test_field_hour(); - fn test_field_min() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).min) as usize - ptr as usize - }, - 1usize, - concat!( - "Offset of field: ", - stringify!(duckdb_time_struct), - "::", - stringify!(min) - ) - ); - } - test_field_min(); - fn test_field_sec() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).sec) as usize - ptr as usize - }, - 2usize, - concat!( - "Offset of field: ", - stringify!(duckdb_time_struct), - "::", - stringify!(sec) - ) - ); - } - test_field_sec(); - fn test_field_micros() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(duckdb_time_struct), - "::", - stringify!(micros) - ) - ); - } - test_field_micros(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).hour) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_time_struct), + "::", + stringify!(hour) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).min) as usize - ptr as usize }, + 1usize, + concat!( + "Offset of field: ", + stringify!(duckdb_time_struct), + "::", + stringify!(min) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).sec) as usize - ptr as usize }, + 2usize, + concat!( + "Offset of field: ", + stringify!(duckdb_time_struct), + "::", + stringify!(sec) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(duckdb_time_struct), + "::", + stringify!(micros) + ) + ); } #[doc = "! Timestamps are stored as microseconds since 1970-01-01"] #[doc = "! Use the duckdb_from_timestamp/duckdb_to_timestamp function to extract individual information"] @@ -15383,6 +10723,8 @@ pub struct duckdb_timestamp { } #[test] fn bindgen_test_layout_duckdb_timestamp() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 8usize, @@ -15393,23 +10735,16 @@ fn bindgen_test_layout_duckdb_timestamp() { 8usize, concat!("Alignment of ", stringify!(duckdb_timestamp)) ); - fn test_field_micros() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_timestamp), - "::", - stringify!(micros) - ) - ); - } - test_field_micros(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_timestamp), + "::", + stringify!(micros) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15419,6 +10754,8 @@ pub struct duckdb_timestamp_struct { } #[test] fn bindgen_test_layout_duckdb_timestamp_struct() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -15429,40 +10766,26 @@ fn bindgen_test_layout_duckdb_timestamp_struct() { 4usize, concat!("Alignment of ", stringify!(duckdb_timestamp_struct)) ); - fn test_field_date() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).date) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_timestamp_struct), - "::", - stringify!(date) - ) - ); - } - test_field_date(); - fn test_field_time() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).time) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(duckdb_timestamp_struct), - "::", - stringify!(time) - ) - ); - } - test_field_time(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).date) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_timestamp_struct), + "::", + stringify!(date) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).time) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(duckdb_timestamp_struct), + "::", + stringify!(time) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15473,6 +10796,8 @@ pub struct duckdb_interval { } #[test] fn bindgen_test_layout_duckdb_interval() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -15483,52 +10808,31 @@ fn bindgen_test_layout_duckdb_interval() { 8usize, concat!("Alignment of ", stringify!(duckdb_interval)) ); - fn test_field_months() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).months) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_interval), - "::", - stringify!(months) - ) - ); - } - test_field_months(); - fn test_field_days() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).days) as usize - ptr as usize - }, - 4usize, - concat!("Offset of field: ", stringify!(duckdb_interval), "::", stringify!(days)) - ); - } - test_field_days(); - fn test_field_micros() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(duckdb_interval), - "::", - stringify!(micros) - ) - ); - } - test_field_micros(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).months) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_interval), + "::", + stringify!(months) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).days) as usize - ptr as usize }, + 4usize, + concat!("Offset of field: ", stringify!(duckdb_interval), "::", stringify!(days)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).micros) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(duckdb_interval), + "::", + stringify!(micros) + ) + ); } #[doc = "! Hugeints are composed in a (lower, upper) component"] #[doc = "! The value of the hugeint is upper * 2^64 + lower"] @@ -15541,6 +10845,8 @@ pub struct duckdb_hugeint { } #[test] fn bindgen_test_layout_duckdb_hugeint() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -15551,30 +10857,16 @@ fn bindgen_test_layout_duckdb_hugeint() { 8usize, concat!("Alignment of ", stringify!(duckdb_hugeint)) ); - fn test_field_lower() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).lower) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(duckdb_hugeint), "::", stringify!(lower)) - ); - } - test_field_lower(); - fn test_field_upper() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).upper) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(duckdb_hugeint), "::", stringify!(upper)) - ); - } - test_field_upper(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).lower) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(duckdb_hugeint), "::", stringify!(lower)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).upper) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(duckdb_hugeint), "::", stringify!(upper)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15585,6 +10877,8 @@ pub struct duckdb_decimal { } #[test] fn bindgen_test_layout_duckdb_decimal() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 24usize, @@ -15595,42 +10889,52 @@ fn bindgen_test_layout_duckdb_decimal() { 8usize, concat!("Alignment of ", stringify!(duckdb_decimal)) ); - fn test_field_width() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).width) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(duckdb_decimal), "::", stringify!(width)) - ); - } - test_field_width(); - fn test_field_scale() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).scale) as usize - ptr as usize - }, - 1usize, - concat!("Offset of field: ", stringify!(duckdb_decimal), "::", stringify!(scale)) - ); - } - test_field_scale(); - fn test_field_value() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).value) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(duckdb_decimal), "::", stringify!(value)) - ); - } - test_field_value(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).width) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(duckdb_decimal), "::", stringify!(width)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).scale) as usize - ptr as usize }, + 1usize, + concat!("Offset of field: ", stringify!(duckdb_decimal), "::", stringify!(scale)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).value) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(duckdb_decimal), "::", stringify!(value)) + ); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct duckdb_string { + pub data: *mut ::std::os::raw::c_char, + pub size: idx_t, +} +#[test] +fn bindgen_test_layout_duckdb_string() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); + assert_eq!( + ::std::mem::size_of::(), + 16usize, + concat!("Size of: ", stringify!(duckdb_string)) + ); + assert_eq!( + ::std::mem::align_of::(), + 8usize, + concat!("Alignment of ", stringify!(duckdb_string)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(duckdb_string), "::", stringify!(data)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(duckdb_string), "::", stringify!(size)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15640,6 +10944,8 @@ pub struct duckdb_blob { } #[test] fn bindgen_test_layout_duckdb_blob() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 16usize, @@ -15650,30 +10956,16 @@ fn bindgen_test_layout_duckdb_blob() { 8usize, concat!("Alignment of ", stringify!(duckdb_blob)) ); - fn test_field_data() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize - }, - 0usize, - concat!("Offset of field: ", stringify!(duckdb_blob), "::", stringify!(data)) - ); - } - test_field_data(); - fn test_field_size() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize - }, - 8usize, - concat!("Offset of field: ", stringify!(duckdb_blob), "::", stringify!(size)) - ); - } - test_field_size(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, + 0usize, + concat!("Offset of field: ", stringify!(duckdb_blob), "::", stringify!(data)) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize }, + 8usize, + concat!("Offset of field: ", stringify!(duckdb_blob), "::", stringify!(size)) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15686,6 +10978,8 @@ pub struct duckdb_column { } #[test] fn bindgen_test_layout_duckdb_column() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 40usize, @@ -15696,91 +10990,56 @@ fn bindgen_test_layout_duckdb_column() { 8usize, concat!("Alignment of ", stringify!(duckdb_column)) ); - fn test_field___deprecated_data() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_data) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_column), - "::", - stringify!(__deprecated_data) - ) - ); - } - test_field___deprecated_data(); - fn test_field___deprecated_nullmask() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_nullmask) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(duckdb_column), - "::", - stringify!(__deprecated_nullmask) - ) - ); - } - test_field___deprecated_nullmask(); - fn test_field___deprecated_type() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_type) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(duckdb_column), - "::", - stringify!(__deprecated_type) - ) - ); - } - test_field___deprecated_type(); - fn test_field___deprecated_name() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_name) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(duckdb_column), - "::", - stringify!(__deprecated_name) - ) - ); - } - test_field___deprecated_name(); - fn test_field_internal_data() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).internal_data) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(duckdb_column), - "::", - stringify!(internal_data) - ) - ); - } - test_field_internal_data(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_data) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_column), + "::", + stringify!(__deprecated_data) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_nullmask) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(duckdb_column), + "::", + stringify!(__deprecated_nullmask) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_type) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(duckdb_column), + "::", + stringify!(__deprecated_type) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_name) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(duckdb_column), + "::", + stringify!(__deprecated_name) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).internal_data) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(duckdb_column), + "::", + stringify!(internal_data) + ) + ); } #[repr(C)] #[derive(Debug, Copy, Clone)] @@ -15794,6 +11053,8 @@ pub struct duckdb_result { } #[test] fn bindgen_test_layout_duckdb_result() { + const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::(), 48usize, @@ -15804,108 +11065,66 @@ fn bindgen_test_layout_duckdb_result() { 8usize, concat!("Alignment of ", stringify!(duckdb_result)) ); - fn test_field___deprecated_column_count() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_column_count) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(duckdb_result), - "::", - stringify!(__deprecated_column_count) - ) - ); - } - test_field___deprecated_column_count(); - fn test_field___deprecated_row_count() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_row_count) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(duckdb_result), - "::", - stringify!(__deprecated_row_count) - ) - ); - } - test_field___deprecated_row_count(); - fn test_field___deprecated_rows_changed() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_rows_changed) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(duckdb_result), - "::", - stringify!(__deprecated_rows_changed) - ) - ); - } - test_field___deprecated_rows_changed(); - fn test_field___deprecated_columns() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_columns) as usize - ptr as usize - }, - 24usize, - concat!( - "Offset of field: ", - stringify!(duckdb_result), - "::", - stringify!(__deprecated_columns) - ) - ); - } - test_field___deprecated_columns(); - fn test_field___deprecated_error_message() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).__deprecated_error_message) as usize - ptr as usize - }, - 32usize, - concat!( - "Offset of field: ", - stringify!(duckdb_result), - "::", - stringify!(__deprecated_error_message) - ) - ); - } - test_field___deprecated_error_message(); - fn test_field_internal_data() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).internal_data) as usize - ptr as usize - }, - 40usize, - concat!( - "Offset of field: ", - stringify!(duckdb_result), - "::", - stringify!(internal_data) - ) - ); - } - test_field_internal_data(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_column_count) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(duckdb_result), + "::", + stringify!(__deprecated_column_count) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_row_count) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(duckdb_result), + "::", + stringify!(__deprecated_row_count) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_rows_changed) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(duckdb_result), + "::", + stringify!(__deprecated_rows_changed) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_columns) as usize - ptr as usize }, + 24usize, + concat!( + "Offset of field: ", + stringify!(duckdb_result), + "::", + stringify!(__deprecated_columns) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).__deprecated_error_message) as usize - ptr as usize }, + 32usize, + concat!( + "Offset of field: ", + stringify!(duckdb_result), + "::", + stringify!(__deprecated_error_message) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).internal_data) as usize - ptr as usize }, + 40usize, + concat!( + "Offset of field: ", + stringify!(duckdb_result), + "::", + stringify!(internal_data) + ) + ); } pub type duckdb_database = *mut ::std::os::raw::c_void; pub type duckdb_connection = *mut ::std::os::raw::c_void; @@ -15976,6 +11195,12 @@ extern "C" { #[doc = " connection: The connection to close."] pub fn duckdb_disconnect(connection: *mut duckdb_connection); } +extern "C" { + #[doc = "Returns the version of the linked DuckDB, with a version postfix for dev versions"] + #[doc = ""] + #[doc = "Usually used for developing C extensions that must return this for a compatibility check."] + pub fn duckdb_library_version() -> *const ::std::os::raw::c_char; +} extern "C" { #[doc = "Initializes an empty configuration object that can be used to provide start-up options for the DuckDB instance"] #[doc = "through `duckdb_open_ext`."] @@ -15992,7 +11217,7 @@ extern "C" { #[doc = "This should not be called in a loop as it internally loops over all the options."] #[doc = ""] #[doc = " returns: The amount of config options available."] - pub fn duckdb_config_count() -> size_t; + pub fn duckdb_config_count() -> usize; } extern "C" { #[doc = "Obtains a human-readable name and description of a specific configuration option. This can be used to e.g."] @@ -16005,7 +11230,7 @@ extern "C" { #[doc = " out_description: A description of the configuration flag."] #[doc = " returns: `DuckDBSuccess` on success or `DuckDBError` on failure."] pub fn duckdb_get_config_flag( - index: size_t, + index: usize, out_name: *mut *const ::std::os::raw::c_char, out_description: *mut *const ::std::os::raw::c_char, ) -> duckdb_state; @@ -16256,11 +11481,20 @@ extern "C" { pub fn duckdb_value_interval(result: *mut duckdb_result, col: idx_t, row: idx_t) -> duckdb_interval; } extern "C" { - #[doc = " returns: The char* value at the specified location, or nullptr if the value cannot be converted."] - #[doc = "The result must be freed with `duckdb_free`."] + #[doc = " DEPRECATED: use duckdb_value_string instead. This function does not work correctly if the string contains null bytes."] + #[doc = " returns: The text value at the specified location as a null-terminated string, or nullptr if the value cannot be"] + #[doc = "converted. The result must be freed with `duckdb_free`."] pub fn duckdb_value_varchar(result: *mut duckdb_result, col: idx_t, row: idx_t) -> *mut ::std::os::raw::c_char; } extern "C" { + #[doc = "s"] + #[doc = " returns: The string value at the specified location."] + #[doc = "The result must be freed with `duckdb_free`."] + pub fn duckdb_value_string(result: *mut duckdb_result, col: idx_t, row: idx_t) -> duckdb_string; +} +extern "C" { + #[doc = " DEPRECATED: use duckdb_value_string_internal instead. This function does not work correctly if the string contains"] + #[doc = "null bytes."] #[doc = " returns: The char* value at the specified location. ONLY works on VARCHAR columns and does not auto-cast."] #[doc = "If the column is NOT a VARCHAR column this function will return NULL."] #[doc = ""] @@ -16271,6 +11505,15 @@ extern "C" { row: idx_t, ) -> *mut ::std::os::raw::c_char; } +extern "C" { + #[doc = " DEPRECATED: use duckdb_value_string_internal instead. This function does not work correctly if the string contains"] + #[doc = "null bytes."] + #[doc = " returns: The char* value at the specified location. ONLY works on VARCHAR columns and does not auto-cast."] + #[doc = "If the column is NOT a VARCHAR column this function will return NULL."] + #[doc = ""] + #[doc = "The result must NOT be freed."] + pub fn duckdb_value_string_internal(result: *mut duckdb_result, col: idx_t, row: idx_t) -> duckdb_string; +} extern "C" { #[doc = " returns: The duckdb_blob value at the specified location. Returns a blob with blob.data set to nullptr if the"] #[doc = "value cannot be converted. The resulting \"blob.data\" must be freed with `duckdb_free.`"] @@ -16286,7 +11529,7 @@ extern "C" { #[doc = ""] #[doc = " size: The number of bytes to allocate."] #[doc = " returns: A pointer to the allocated memory region."] - pub fn duckdb_malloc(size: size_t) -> *mut ::std::os::raw::c_void; + pub fn duckdb_malloc(size: usize) -> *mut ::std::os::raw::c_void; } extern "C" { #[doc = "Free a value returned from `duckdb_malloc`, `duckdb_value_varchar` or `duckdb_value_blob`."] @@ -16359,6 +11602,15 @@ extern "C" { #[doc = " returns: The converted `duckdb_hugeint` element."] pub fn duckdb_double_to_hugeint(val: f64) -> duckdb_hugeint; } +extern "C" { + #[doc = "Converts a double value to a duckdb_decimal object."] + #[doc = ""] + #[doc = "If the conversion fails because the double value is too big, or the width/scale are invalid the result will be 0."] + #[doc = ""] + #[doc = " val: The double value."] + #[doc = " returns: The converted `duckdb_decimal` element."] + pub fn duckdb_double_to_decimal(val: f64, width: u8, scale: u8) -> duckdb_decimal; +} extern "C" { #[doc = "Converts a duckdb_decimal object (as obtained from a `DUCKDB_TYPE_DECIMAL` column) into a double."] #[doc = ""] @@ -16457,6 +11709,14 @@ extern "C" { val: duckdb_hugeint, ) -> duckdb_state; } +extern "C" { + #[doc = "Binds a duckdb_decimal value to the prepared statement at the specified index."] + pub fn duckdb_bind_decimal( + prepared_statement: duckdb_prepared_statement, + param_idx: idx_t, + val: duckdb_decimal, + ) -> duckdb_state; +} extern "C" { #[doc = "Binds an uint8_t value to the prepared statement at the specified index."] pub fn duckdb_bind_uint8(prepared_statement: duckdb_prepared_statement, param_idx: idx_t, val: u8) -> duckdb_state; @@ -17330,6 +12590,13 @@ extern "C" { #[doc = " parameter: The parameter to add."] pub fn duckdb_replacement_scan_add_parameter(info: duckdb_replacement_scan_info, parameter: duckdb_value); } +extern "C" { + #[doc = "Report that an error has occurred while executing the replacement scan."] + #[doc = ""] + #[doc = " info: The info object"] + #[doc = " error: The error message"] + pub fn duckdb_replacement_scan_set_error(info: duckdb_replacement_scan_info, error: *const ::std::os::raw::c_char); +} extern "C" { #[doc = "Creates an appender object."] #[doc = ""] @@ -17644,6 +12911,8 @@ pub struct __va_list_tag { } #[test] fn bindgen_test_layout___va_list_tag() { + const UNINIT: ::std::mem::MaybeUninit<__va_list_tag> = ::std::mem::MaybeUninit::uninit(); + let ptr = UNINIT.as_ptr(); assert_eq!( ::std::mem::size_of::<__va_list_tag>(), 24usize, @@ -17654,72 +12923,44 @@ fn bindgen_test_layout___va_list_tag() { 8usize, concat!("Alignment of ", stringify!(__va_list_tag)) ); - fn test_field_gp_offset() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__va_list_tag>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).gp_offset) as usize - ptr as usize - }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__va_list_tag), - "::", - stringify!(gp_offset) - ) - ); - } - test_field_gp_offset(); - fn test_field_fp_offset() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__va_list_tag>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).fp_offset) as usize - ptr as usize - }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__va_list_tag), - "::", - stringify!(fp_offset) - ) - ); - } - test_field_fp_offset(); - fn test_field_overflow_arg_area() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__va_list_tag>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).overflow_arg_area) as usize - ptr as usize - }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__va_list_tag), - "::", - stringify!(overflow_arg_area) - ) - ); - } - test_field_overflow_arg_area(); - fn test_field_reg_save_area() { - assert_eq!( - unsafe { - let uninit = ::std::mem::MaybeUninit::<__va_list_tag>::uninit(); - let ptr = uninit.as_ptr(); - ::std::ptr::addr_of!((*ptr).reg_save_area) as usize - ptr as usize - }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__va_list_tag), - "::", - stringify!(reg_save_area) - ) - ); - } - test_field_reg_save_area(); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).gp_offset) as usize - ptr as usize }, + 0usize, + concat!( + "Offset of field: ", + stringify!(__va_list_tag), + "::", + stringify!(gp_offset) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).fp_offset) as usize - ptr as usize }, + 4usize, + concat!( + "Offset of field: ", + stringify!(__va_list_tag), + "::", + stringify!(fp_offset) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).overflow_arg_area) as usize - ptr as usize }, + 8usize, + concat!( + "Offset of field: ", + stringify!(__va_list_tag), + "::", + stringify!(overflow_arg_area) + ) + ); + assert_eq!( + unsafe { ::std::ptr::addr_of!((*ptr).reg_save_area) as usize - ptr as usize }, + 16usize, + concat!( + "Offset of field: ", + stringify!(__va_list_tag), + "::", + stringify!(reg_save_area) + ) + ); } diff --git a/libduckdb-sys/duckdb/duckdb.cpp b/libduckdb-sys/duckdb/duckdb.cpp index 643b0171..26f282fd 100644 --- a/libduckdb-sys/duckdb/duckdb.cpp +++ b/libduckdb-sys/duckdb/duckdb.cpp @@ -341,6 +341,7 @@ class BaseTableRef : public TableRef { + namespace duckdb { struct CreateIndexInfo : public CreateInfo { @@ -359,7 +360,12 @@ struct CreateIndexInfo : public CreateInfo { vector> expressions; vector> parsed_expressions; - vector column_ids; + //! Types used for the CREATE INDEX scan + vector scan_types; + //! The names of the columns, used for the CREATE INDEX scan + vector names; + //! Column IDs needed for index creation + vector column_ids; protected: void SerializeInternal(Serializer &serializer) const override; @@ -403,6 +409,7 @@ namespace duckdb { class Block : public FileBuffer { public: Block(Allocator &allocator, block_id_t id); + Block(Allocator &allocator, block_id_t id, uint32_t internal_size); Block(FileBuffer &source, block_id_t id); block_id_t id; @@ -411,8 +418,8 @@ class Block : public FileBuffer { struct BlockPointer { BlockPointer(block_id_t block_id_p, uint32_t offset_p) : block_id(block_id_p), offset(offset_p) {}; BlockPointer() {}; - block_id_t block_id; - uint32_t offset; + block_id_t block_id {0}; + uint32_t offset {0}; }; } // namespace duckdb @@ -431,7 +438,11 @@ struct BlockPointer { + + namespace duckdb { +class BlockHandle; +class BufferManager; class ClientContext; class DatabaseInstance; @@ -439,15 +450,22 @@ class DatabaseInstance; //! BlockManager creates and accesses blocks. The concrete types implements how blocks are stored. class BlockManager { public: + explicit BlockManager(BufferManager &buffer_manager) : buffer_manager(buffer_manager) { + } virtual ~BlockManager() = default; - virtual void StartCheckpoint() = 0; + //! The buffer manager + BufferManager &buffer_manager; + +public: //! Creates a new block inside the block manager - virtual unique_ptr CreateBlock(block_id_t block_id) = 0; + virtual unique_ptr CreateBlock(block_id_t block_id, FileBuffer *source_buffer) = 0; //! Return the next free block id virtual block_id_t GetFreeBlockId() = 0; //! Returns whether or not a specified block is the root block virtual bool IsRootBlock(block_id_t root) = 0; + //! Mark a block as "free"; free blocks are immediately added to the free list and can be immediately overwritten + virtual void MarkBlockAsFree(block_id_t block_id) = 0; //! Mark a block as "modified"; modified blocks are added to the free list after a checkpoint (i.e. their data is //! assumed to be rewritten) virtual void MarkBlockAsModified(block_id_t block_id) = 0; @@ -472,8 +490,21 @@ class BlockManager { //! Returns the number of free blocks virtual idx_t FreeBlocks() = 0; + //! Register a block with the given block id in the base file + shared_ptr RegisterBlock(block_id_t block_id); + //! Convert an existing in-memory buffer into a persistent disk-backed block + shared_ptr ConvertToPersistent(block_id_t block_id, shared_ptr old_block); + + void UnregisterBlock(block_id_t block_id, bool can_destroy); + static BlockManager &GetBlockManager(ClientContext &context); static BlockManager &GetBlockManager(DatabaseInstance &db); + +private: + //! The lock for the set of blocks + mutex blocks_lock; + //! A mapping of block id -> BlockHandle + unordered_map> blocks; }; } // namespace duckdb @@ -482,25 +513,34 @@ class BlockManager { namespace duckdb { class DatabaseInstance; -//! This struct is responsible for writing metadata to disk +//! This struct is responsible for writing data to disk in a stream of blocks. class MetaBlockWriter : public Serializer { public: - MetaBlockWriter(DatabaseInstance &db, block_id_t initial_block_id = INVALID_BLOCK); + MetaBlockWriter(BlockManager &block_manager, block_id_t initial_block_id = INVALID_BLOCK); ~MetaBlockWriter() override; - DatabaseInstance &db; + BlockManager &block_manager; + +protected: unique_ptr block; set written_blocks; idx_t offset; public: BlockPointer GetBlockPointer(); - void Flush(); + virtual void Flush(); void WriteData(const_data_ptr_t buffer, idx_t write_size) override; + void MarkWrittenBlocks() { + for (auto &block_id : written_blocks) { + block_manager.MarkBlockAsModified(block_id); + } + } + protected: virtual block_id_t GetNextBlockId(); + void AdvanceBlock(); }; } // namespace duckdb @@ -775,29 +815,14 @@ class PragmaFunctionCatalogEntry : public StandardEntry { namespace duckdb { struct CreateScalarFunctionInfo : public CreateFunctionInfo { - explicit CreateScalarFunctionInfo(ScalarFunction function) - : CreateFunctionInfo(CatalogType::SCALAR_FUNCTION_ENTRY), functions(function.name) { - name = function.name; - functions.AddFunction(move(function)); - } - explicit CreateScalarFunctionInfo(ScalarFunctionSet set) - : CreateFunctionInfo(CatalogType::SCALAR_FUNCTION_ENTRY), functions(move(set)) { - name = functions.name; - for (auto &func : functions.functions) { - func.name = functions.name; - } - } + DUCKDB_API explicit CreateScalarFunctionInfo(ScalarFunction function); + DUCKDB_API explicit CreateScalarFunctionInfo(ScalarFunctionSet set); ScalarFunctionSet functions; public: - unique_ptr Copy() const override { - ScalarFunctionSet set(name); - set.functions = functions.functions; - auto result = make_unique(move(set)); - CopyProperties(*result); - return move(result); - } + DUCKDB_API unique_ptr Copy() const override; + DUCKDB_API unique_ptr GetAlterInfo() const override; }; } // namespace duckdb @@ -808,12 +833,13 @@ namespace duckdb { //! A table function in the catalog class ScalarFunctionCatalogEntry : public StandardEntry { public: - ScalarFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateScalarFunctionInfo *info) - : StandardEntry(CatalogType::SCALAR_FUNCTION_ENTRY, schema, catalog, info->name), functions(info->functions) { - } + ScalarFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateScalarFunctionInfo *info); //! The scalar functions ScalarFunctionSet functions; + +public: + unique_ptr AlterEntry(ClientContext &context, AlterInfo *info) override; }; } // namespace duckdb @@ -1033,14 +1059,18 @@ class DependencyManager { namespace duckdb { struct CreateTypeInfo : public CreateInfo { - CreateTypeInfo() : CreateInfo(CatalogType::TYPE_ENTRY) { } + CreateTypeInfo(string name_p, LogicalType type_p) + : CreateInfo(CatalogType::TYPE_ENTRY), name(move(name_p)), type(move(type_p)) { + } //! Name of the Type string name; //! Logical Type - LogicalType type; // Shouldn't this be named `logical_type`? (shadows a parent member `type`) + LogicalType type; + //! Used by create enum from query + unique_ptr query; public: unique_ptr Copy() const override { @@ -1048,6 +1078,9 @@ struct CreateTypeInfo : public CreateInfo { CopyProperties(*result); result->name = name; result->type = type; + if (query) { + result->query = query->Copy(); + } return move(result); } @@ -1120,7 +1153,7 @@ struct ClientData { unique_ptr query_profiler_history; //! The set of temporary objects that belong to this client - unique_ptr temporary_objects; + shared_ptr temporary_objects; //! The set of bound prepared statements that belong to this client unordered_map> prepared_statements; @@ -1219,7 +1252,7 @@ class FunctionExpression : public ParsedExpression { return function_name + "(" + entry.children[0]->ToString() + ")"; } } else if (entry.children.size() == 2) { - return StringUtil::Format("(%s) %s (%s)", entry.children[0]->ToString(), function_name, + return StringUtil::Format("(%s %s %s)", entry.children[0]->ToString(), function_name, entry.children[1]->ToString()); } } @@ -1281,6 +1314,7 @@ class FunctionExpression : public ParsedExpression { + namespace duckdb { struct CreatePragmaFunctionInfo : public CreateFunctionInfo { @@ -1488,6 +1522,7 @@ struct DropInfo : public ParseInfo { + namespace duckdb { struct CreateTableInfo : public CreateInfo { @@ -1497,7 +1532,7 @@ struct CreateTableInfo : public CreateInfo { //! Table name to insert to string table; //! List of columns of the table - vector columns; + ColumnList columns; //! List of constraints on the table vector> constraints; //! CREATE TABLE from QUERY @@ -1540,6 +1575,58 @@ struct CreateTableInfo : public CreateInfo { +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/storage_lock.hpp +// +// +//===----------------------------------------------------------------------===// + + + + + + + +namespace duckdb { +class StorageLock; + +enum class StorageLockType { SHARED = 0, EXCLUSIVE = 1 }; + +class StorageLockKey { +public: + StorageLockKey(StorageLock &lock, StorageLockType type); + ~StorageLockKey(); + +private: + StorageLock &lock; + StorageLockType type; +}; + +class StorageLock { + friend class StorageLockKey; + +public: + StorageLock(); + + //! Get an exclusive lock + unique_ptr GetExclusiveLock(); + //! Get a shared lock + unique_ptr GetSharedLock(); + +private: + mutex exclusive_lock; + atomic read_count; + +private: + //! Release an exclusive lock + void ReleaseExclusiveLock(); + //! Release a shared lock + void ReleaseSharedLock(); +}; + +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB @@ -1558,13 +1645,13 @@ namespace duckdb { class SegmentBase { public: - SegmentBase(idx_t start, idx_t count) : start(start), count(count) { + SegmentBase(idx_t start, idx_t count) : start(start), count(count), next(nullptr) { } virtual ~SegmentBase() { - // destroy the chain of segments iteratively (rather than recursively) - while (next && next->next) { - next = move(next->next); - } + } + + SegmentBase *Next() { + return next.load(); } //! The start row id of this chunk @@ -1572,7 +1659,46 @@ class SegmentBase { //! The amount of entries in this storage chunk atomic count; //! The next segment after this one - unique_ptr next; + atomic next; +}; + +} // namespace duckdb + +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/table/segment_lock.hpp +// +// +//===----------------------------------------------------------------------===// + + + + + + +namespace duckdb { + +struct SegmentLock { +public: + SegmentLock() { + } + SegmentLock(mutex &lock) : lock(lock) { + } + // disable copy constructors + SegmentLock(const SegmentLock &other) = delete; + SegmentLock &operator=(const SegmentLock &) = delete; + //! enable move constructors + SegmentLock(SegmentLock &&other) noexcept { + std::swap(lock, other.lock); + } + SegmentLock &operator=(SegmentLock &&other) noexcept { + std::swap(lock, other.lock); + return *this; + } + +private: + unique_lock lock; }; } // namespace duckdb @@ -1584,35 +1710,62 @@ namespace duckdb { struct SegmentNode { idx_t row_start; - SegmentBase *node; + unique_ptr node; }; //! The SegmentTree maintains a list of all segments of a specific column in a table, and allows searching for a segment //! by row number class SegmentTree { public: - //! The initial segment of the tree - unique_ptr root_node; - //! The nodes in the tree, can be binary searched - vector nodes; - //! Lock to access or modify the nodes - mutex node_lock; + //! Locks the segment tree. All methods to the segment tree either lock the segment tree, or take an already + //! obtained lock. + SegmentLock Lock(); + + bool IsEmpty(SegmentLock &); -public: //! Gets a pointer to the first segment. Useful for scans. SegmentBase *GetRootSegment(); + SegmentBase *GetRootSegment(SegmentLock &); + //! Obtains ownership of the data of the segment tree + vector MoveSegments(SegmentLock &); + //! Gets a pointer to the nth segment. Negative numbers start from the back. + SegmentBase *GetSegmentByIndex(int64_t index); + SegmentBase *GetSegmentByIndex(SegmentLock &, int64_t index); + //! Gets a pointer to the last segment. Useful for appends. SegmentBase *GetLastSegment(); + SegmentBase *GetLastSegment(SegmentLock &); //! Gets a pointer to a specific column segment for the given row SegmentBase *GetSegment(idx_t row_number); + SegmentBase *GetSegment(SegmentLock &, idx_t row_number); + //! Append a column segment to the tree void AppendSegment(unique_ptr segment); + void AppendSegment(SegmentLock &, unique_ptr segment); + //! Debug method, check whether the segment is in the segment tree + bool HasSegment(SegmentBase *segment); + bool HasSegment(SegmentLock &, SegmentBase *segment); //! Replace this tree with another tree, taking over its nodes in-place void Replace(SegmentTree &other); + void Replace(SegmentLock &, SegmentTree &other); + + //! Erase all segments after a specific segment + void EraseSegments(SegmentLock &, idx_t segment_start); - //! Get the segment index of the column segment for the given row (does not lock the segment tree!) + //! Get the segment index of the column segment for the given row idx_t GetSegmentIndex(idx_t row_number); + idx_t GetSegmentIndex(SegmentLock &, idx_t row_number); + bool TryGetSegmentIndex(SegmentLock &, idx_t row_number, idx_t &); + + void Verify(SegmentLock &); + void Verify(); + +private: + //! The nodes in the tree, can be binary searched + vector nodes; + //! Lock to access or modify the nodes + mutex node_lock; }; } // namespace duckdb @@ -1661,6 +1814,7 @@ namespace duckdb { class RowGroup; struct SelectionVector; class Transaction; +struct TransactionData; enum class ChunkInfoType : uint8_t { CONSTANT_INFO, VECTOR_INFO, EMPTY_INFO }; @@ -1679,11 +1833,11 @@ class ChunkInfo { public: //! Gets up to max_count entries from the chunk info. If the ret is 0>ret>max_count, the selection vector is filled //! with the tuples - virtual idx_t GetSelVector(Transaction &transaction, SelectionVector &sel_vector, idx_t max_count) = 0; + virtual idx_t GetSelVector(TransactionData transaction, SelectionVector &sel_vector, idx_t max_count) = 0; virtual idx_t GetCommittedSelVector(transaction_t min_start_id, transaction_t min_transaction_id, SelectionVector &sel_vector, idx_t max_count) = 0; //! Returns whether or not a single row in the ChunkInfo should be used or not for the given transaction - virtual bool Fetch(Transaction &transaction, row_t row) = 0; + virtual bool Fetch(TransactionData transaction, row_t row) = 0; virtual void CommitAppend(transaction_t commit_id, idx_t start, idx_t end) = 0; virtual void Serialize(Serializer &serialize) = 0; @@ -1698,10 +1852,10 @@ class ChunkConstantInfo : public ChunkInfo { atomic delete_id; public: - idx_t GetSelVector(Transaction &transaction, SelectionVector &sel_vector, idx_t max_count) override; + idx_t GetSelVector(TransactionData transaction, SelectionVector &sel_vector, idx_t max_count) override; idx_t GetCommittedSelVector(transaction_t min_start_id, transaction_t min_transaction_id, SelectionVector &sel_vector, idx_t max_count) override; - bool Fetch(Transaction &transaction, row_t row) override; + bool Fetch(TransactionData transaction, row_t row) override; void CommitAppend(transaction_t commit_id, idx_t start, idx_t end) override; void Serialize(Serializer &serialize) override; @@ -1729,19 +1883,20 @@ class ChunkVectorInfo : public ChunkInfo { public: idx_t GetSelVector(transaction_t start_time, transaction_t transaction_id, SelectionVector &sel_vector, idx_t max_count); - idx_t GetSelVector(Transaction &transaction, SelectionVector &sel_vector, idx_t max_count) override; + idx_t GetSelVector(TransactionData transaction, SelectionVector &sel_vector, idx_t max_count) override; idx_t GetCommittedSelVector(transaction_t min_start_id, transaction_t min_transaction_id, SelectionVector &sel_vector, idx_t max_count) override; - bool Fetch(Transaction &transaction, row_t row) override; + bool Fetch(TransactionData transaction, row_t row) override; void CommitAppend(transaction_t commit_id, idx_t start, idx_t end) override; void Append(idx_t start, idx_t end, transaction_t commit_id); + //! Performs a delete in the ChunkVectorInfo - returns how many tuples were actually deleted //! The number of rows that were actually deleted might be lower than the input count //! In case we delete rows that were already deleted //! Note that "rows" is written to to reflect the row ids that were actually deleted //! i.e. after calling this function, rows will hold [0..actual_delete_count] row ids of the actually deleted tuples - idx_t Delete(Transaction &transaction, row_t rows[], idx_t count); + idx_t Delete(transaction_t transaction_id, row_t rows[], idx_t count); void CommitDelete(transaction_t commit_id, row_t rows[], idx_t count); void Serialize(Serializer &serialize) override; @@ -1769,13 +1924,216 @@ class ChunkVectorInfo : public ChunkInfo { +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/function/compression_function.hpp +// +// +//===----------------------------------------------------------------------===// + + + + + + +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/map.hpp +// +// +//===----------------------------------------------------------------------===// + + + +#include + +namespace duckdb { +using std::map; +using std::multimap; +} // namespace duckdb + + + + +namespace duckdb { +class DatabaseInstance; +class ColumnData; +class ColumnDataCheckpointer; +class ColumnSegment; +class SegmentStatistics; + +struct ColumnFetchState; +struct ColumnScanState; +struct SegmentScanState; + +struct AnalyzeState { + virtual ~AnalyzeState() { + } +}; + +struct CompressionState { + virtual ~CompressionState() { + } +}; + +struct CompressedSegmentState { + virtual ~CompressedSegmentState() { + } +}; + +struct CompressionAppendState { + CompressionAppendState(BufferHandle handle_p) : handle(move(handle_p)) { + } + virtual ~CompressionAppendState() { + } + + BufferHandle handle; +}; + +//===--------------------------------------------------------------------===// +// Analyze +//===--------------------------------------------------------------------===// +//! The analyze functions are used to determine whether or not to use this compression method +//! The system first determines the potential compression methods to use based on the physical type of the column +//! After that the following steps are taken: +//! 1. The init_analyze is called to initialize the analyze state of every candidate compression method +//! 2. The analyze method is called with all of the input data in the order in which it must be stored. +//! analyze can return "false". In that case, the compression method is taken out of consideration early. +//! 3. The final_analyze method is called, which should return a score for the compression method + +//! The system then decides which compression function to use based on the analyzed score (returned from final_analyze) +typedef unique_ptr (*compression_init_analyze_t)(ColumnData &col_data, PhysicalType type); +typedef bool (*compression_analyze_t)(AnalyzeState &state, Vector &input, idx_t count); +typedef idx_t (*compression_final_analyze_t)(AnalyzeState &state); + +//===--------------------------------------------------------------------===// +// Compress +//===--------------------------------------------------------------------===// +typedef unique_ptr (*compression_init_compression_t)(ColumnDataCheckpointer &checkpointer, + unique_ptr state); +typedef void (*compression_compress_data_t)(CompressionState &state, Vector &scan_vector, idx_t count); +typedef void (*compression_compress_finalize_t)(CompressionState &state); + +//===--------------------------------------------------------------------===// +// Uncompress / Scan +//===--------------------------------------------------------------------===// +typedef unique_ptr (*compression_init_segment_scan_t)(ColumnSegment &segment); + +//! Function prototype used for reading an entire vector (STANDARD_VECTOR_SIZE) +typedef void (*compression_scan_vector_t)(ColumnSegment &segment, ColumnScanState &state, idx_t scan_count, + Vector &result); +//! Function prototype used for reading an arbitrary ('scan_count') number of values +typedef void (*compression_scan_partial_t)(ColumnSegment &segment, ColumnScanState &state, idx_t scan_count, + Vector &result, idx_t result_offset); +//! Function prototype used for reading a single value +typedef void (*compression_fetch_row_t)(ColumnSegment &segment, ColumnFetchState &state, row_t row_id, Vector &result, + idx_t result_idx); +//! Function prototype used for skipping 'skip_count' values, non-trivial if random-access is not supported for the +//! compressed data. +typedef void (*compression_skip_t)(ColumnSegment &segment, ColumnScanState &state, idx_t skip_count); + +//===--------------------------------------------------------------------===// +// Append (optional) +//===--------------------------------------------------------------------===// +typedef unique_ptr (*compression_init_segment_t)(ColumnSegment &segment, block_id_t block_id); +typedef unique_ptr (*compression_init_append_t)(ColumnSegment &segment); +typedef idx_t (*compression_append_t)(CompressionAppendState &append_state, ColumnSegment &segment, + SegmentStatistics &stats, UnifiedVectorFormat &data, idx_t offset, idx_t count); +typedef idx_t (*compression_finalize_append_t)(ColumnSegment &segment, SegmentStatistics &stats); +typedef void (*compression_revert_append_t)(ColumnSegment &segment, idx_t start_row); + +class CompressionFunction { +public: + CompressionFunction(CompressionType type, PhysicalType data_type, compression_init_analyze_t init_analyze, + compression_analyze_t analyze, compression_final_analyze_t final_analyze, + compression_init_compression_t init_compression, compression_compress_data_t compress, + compression_compress_finalize_t compress_finalize, compression_init_segment_scan_t init_scan, + compression_scan_vector_t scan_vector, compression_scan_partial_t scan_partial, + compression_fetch_row_t fetch_row, compression_skip_t skip, + compression_init_segment_t init_segment = nullptr, + compression_init_append_t init_append = nullptr, compression_append_t append = nullptr, + compression_finalize_append_t finalize_append = nullptr, + compression_revert_append_t revert_append = nullptr) + : type(type), data_type(data_type), init_analyze(init_analyze), analyze(analyze), final_analyze(final_analyze), + init_compression(init_compression), compress(compress), compress_finalize(compress_finalize), + init_scan(init_scan), scan_vector(scan_vector), scan_partial(scan_partial), fetch_row(fetch_row), skip(skip), + init_segment(init_segment), init_append(init_append), append(append), finalize_append(finalize_append), + revert_append(revert_append) { + } + + //! Compression type + CompressionType type; + //! The data type this function can compress + PhysicalType data_type; + + //! Analyze step: determine which compression function is the most effective + //! init_analyze is called once to set up the analyze state + compression_init_analyze_t init_analyze; + //! analyze is called several times (once per vector in the row group) + //! analyze should return true, unless compression is no longer possible with this compression method + //! in that case false should be returned + compression_analyze_t analyze; + //! final_analyze should return the score of the compression function + //! ideally this is the exact number of bytes required to store the data + //! this is not required/enforced: it can be an estimate as well + //! also this function can return DConstants::INVALID_INDEX to skip this compression method + compression_final_analyze_t final_analyze; + + //! Compression step: actually compress the data + //! init_compression is called once to set up the comperssion state + compression_init_compression_t init_compression; + //! compress is called several times (once per vector in the row group) + compression_compress_data_t compress; + //! compress_finalize is called after + compression_compress_finalize_t compress_finalize; + + //! init_scan is called to set up the scan state + compression_init_segment_scan_t init_scan; + //! scan_vector scans an entire vector using the scan state + compression_scan_vector_t scan_vector; + //! scan_partial scans a subset of a vector + //! this can request > vector_size as well + //! this is used if a vector crosses segment boundaries, or for child columns of lists + compression_scan_partial_t scan_partial; + //! fetch an individual row from the compressed vector + //! used for index lookups + compression_fetch_row_t fetch_row; + //! Skip forward in the compressed segment + compression_skip_t skip; + + // Append functions + //! This only really needs to be defined for uncompressed segments + + //! Initialize a compressed segment (optional) + compression_init_segment_t init_segment; + //! Initialize the append state (optional) + compression_init_append_t init_append; + //! Append to the compressed segment (optional) + compression_append_t append; + //! Finalize an append to the segment + compression_finalize_append_t finalize_append; + //! Revert append (optional) + compression_revert_append_t revert_append; +}; + +//! The set of compression functions +struct CompressionFunctionSet { + mutex lock; + map> functions; +}; + +} // namespace duckdb + + namespace duckdb { class ColumnSegment; class DataTable; +class LocalTableStorage; class RowGroup; class UpdateSegment; -class ValiditySegment; struct TableAppendState; @@ -1786,6 +2144,8 @@ struct ColumnAppendState { vector child_appends; //! The write lock that is held by the append unique_ptr lock; + //! The compression append state + unique_ptr append_state; }; struct RowGroupAppendState { @@ -1807,23 +2167,34 @@ struct IndexLock { }; struct TableAppendState { - TableAppendState() : row_group_append_state(*this) { - } + TableAppendState(); + ~TableAppendState(); RowGroupAppendState row_group_append_state; unique_lock append_lock; row_t row_start; row_t current_row; - idx_t remaining_append_count; + //! The total number of rows appended by the append operation + idx_t total_append_count; + //! The first row-group that has been appended to + RowGroup *start_row_group; + //! The transaction data + TransactionData transaction; + //! The remaining append count, only if the append count is known beforehand + idx_t remaining; }; -} // namespace duckdb +struct LocalAppendState { + TableAppendState append_state; + LocalTableStorage *storage; +}; +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/storage/statistics/segment_statistics.hpp +// duckdb/storage/table/scan_state.hpp // // //===----------------------------------------------------------------------===// @@ -1833,25 +2204,6 @@ struct TableAppendState { - -namespace duckdb { - -class SegmentStatistics { -public: - SegmentStatistics(LogicalType type); - SegmentStatistics(LogicalType type, unique_ptr statistics); - - LogicalType type; - - //! Type-specific statistics of the segment - unique_ptr statistics; - -public: - void Reset(); -}; - -} // namespace duckdb - //===----------------------------------------------------------------------===// // DuckDB // @@ -1880,198 +2232,217 @@ enum class TableScanType : uint8_t { } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/adaptive_filter.hpp +// +// +//===----------------------------------------------------------------------===// + -namespace duckdb { -class ColumnData; -class DatabaseInstance; -class DataTable; -struct DataTableInfo; -class ExpressionExecutor; -class TableDataWriter; -class UpdateSegment; -class Vector; -struct RowGroupPointer; -struct VersionNode; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_aggregate_expression.hpp +// +// +//===----------------------------------------------------------------------===// + -class RowGroup : public SegmentBase { -public: - friend class ColumnData; - friend class VersionDeleteState; -public: - static constexpr const idx_t ROW_GROUP_VECTOR_COUNT = 120; - static constexpr const idx_t ROW_GROUP_SIZE = STANDARD_VECTOR_SIZE * ROW_GROUP_VECTOR_COUNT; + +#include + +namespace duckdb { +class BoundAggregateExpression : public Expression { public: - RowGroup(DatabaseInstance &db, DataTableInfo &table_info, idx_t start, idx_t count); - RowGroup(DatabaseInstance &db, DataTableInfo &table_info, const vector &types, - RowGroupPointer &pointer); - ~RowGroup(); + BoundAggregateExpression(AggregateFunction function, vector> children, + unique_ptr filter, unique_ptr bind_info, + AggregateType aggr_type); -private: - //! The database instance - DatabaseInstance &db; - //! The table info of this row_group - DataTableInfo &table_info; - //! The version info of the row_group (inserted and deleted tuple info) - shared_ptr version_info; - //! The column data of the row_group - vector> columns; - //! The segment statistics for each of the columns - vector> stats; + //! The bound function expression + AggregateFunction function; + //! List of arguments to the function + vector> children; + //! The bound function data (if any) + unique_ptr bind_info; + AggregateType aggr_type; + + //! Filter for this aggregate + unique_ptr filter; public: - DatabaseInstance &GetDatabase() { - return db; + bool IsDistinct() const { + return aggr_type == AggregateType::DISTINCT; } - DataTableInfo &GetTableInfo() { - return table_info; + + bool IsAggregate() const override { + return true; } - idx_t GetColumnIndex(ColumnData *data) { - for (idx_t i = 0; i < columns.size(); i++) { - if (columns[i].get() == data) { - return i; - } - } - return 0; + bool IsFoldable() const override { + return false; } + bool PropagatesNullValues() const override; - unique_ptr AlterType(ClientContext &context, const LogicalType &target_type, idx_t changed_idx, - ExpressionExecutor &executor, TableScanState &scan_state, DataChunk &scan_chunk); - unique_ptr AddColumn(ClientContext &context, ColumnDefinition &new_column, ExpressionExecutor &executor, - Expression *default_value, Vector &intermediate); - unique_ptr RemoveColumn(idx_t removed_column); + string ToString() const override; - void CommitDrop(); - void CommitDropColumn(idx_t index); + hash_t Hash() const override; + bool Equals(const BaseExpression *other) const override; + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb - void InitializeEmpty(const vector &types); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_between_expression.hpp +// +// +//===----------------------------------------------------------------------===// - //! Initialize a scan over this row_group - bool InitializeScan(RowGroupScanState &state); - bool InitializeScanWithOffset(RowGroupScanState &state, idx_t vector_offset); - //! Checks the given set of table filters against the row-group statistics. Returns false if the entire row group - //! can be skipped. - bool CheckZonemap(TableFilterSet &filters, const vector &column_ids); - //! Checks the given set of table filters against the per-segment statistics. Returns false if any segments were - //! skipped. - bool CheckZonemapSegments(RowGroupScanState &state); - void Scan(Transaction &transaction, RowGroupScanState &state, DataChunk &result); - void ScanCommitted(RowGroupScanState &state, DataChunk &result, TableScanType type); - idx_t GetSelVector(Transaction &transaction, idx_t vector_idx, SelectionVector &sel_vector, idx_t max_count); - idx_t GetCommittedSelVector(transaction_t start_time, transaction_t transaction_id, idx_t vector_idx, - SelectionVector &sel_vector, idx_t max_count); - //! For a specific row, returns true if it should be used for the transaction and false otherwise. - bool Fetch(Transaction &transaction, idx_t row); - //! Fetch a specific row from the row_group and insert it into the result at the specified index - void FetchRow(Transaction &transaction, ColumnFetchState &state, const vector &column_ids, row_t row_id, - DataChunk &result, idx_t result_idx); - //! Append count rows to the version info - void AppendVersionInfo(Transaction &transaction, idx_t start, idx_t count, transaction_t commit_id); - //! Commit a previous append made by RowGroup::AppendVersionInfo - void CommitAppend(transaction_t commit_id, idx_t start, idx_t count); - //! Revert a previous append made by RowGroup::AppendVersionInfo - void RevertAppend(idx_t start); - //! Delete the given set of rows in the version manager - idx_t Delete(Transaction &transaction, DataTable *table, row_t *row_ids, idx_t count); +namespace duckdb { - RowGroupPointer Checkpoint(TableDataWriter &writer, vector> &global_stats); - static void Serialize(RowGroupPointer &pointer, Serializer &serializer); - static RowGroupPointer Deserialize(Deserializer &source, const vector &columns); +class BoundBetweenExpression : public Expression { +public: + BoundBetweenExpression(unique_ptr input, unique_ptr lower, unique_ptr upper, + bool lower_inclusive, bool upper_inclusive); - void InitializeAppend(Transaction &transaction, RowGroupAppendState &append_state, idx_t remaining_append_count); - void Append(RowGroupAppendState &append_state, DataChunk &chunk, idx_t append_count); + unique_ptr input; + unique_ptr lower; + unique_ptr upper; + bool lower_inclusive; + bool upper_inclusive; - void Update(Transaction &transaction, DataChunk &updates, row_t *ids, idx_t offset, idx_t count, - const vector &column_ids); - //! Update a single column; corresponds to DataTable::UpdateColumn - //! This method should only be called from the WAL - void UpdateColumn(Transaction &transaction, DataChunk &updates, Vector &row_ids, - const vector &column_path); +public: + string ToString() const override; - void MergeStatistics(idx_t column_idx, const BaseStatistics &other); - void MergeIntoStatistics(idx_t column_idx, BaseStatistics &other); - unique_ptr GetStatistics(idx_t column_idx); + bool Equals(const BaseExpression *other) const override; - void GetStorageInfo(idx_t row_group_index, vector> &result); + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); - void Verify(); +public: + ExpressionType LowerComparisonType() { + return lower_inclusive ? ExpressionType::COMPARE_GREATERTHANOREQUALTO : ExpressionType::COMPARE_GREATERTHAN; + } + ExpressionType UpperComparisonType() { + return upper_inclusive ? ExpressionType::COMPARE_LESSTHANOREQUALTO : ExpressionType::COMPARE_LESSTHAN; + } +}; +} // namespace duckdb - void NextVector(RowGroupScanState &state); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_case_expression.hpp +// +// +//===----------------------------------------------------------------------===// -private: - ChunkInfo *GetChunkInfo(idx_t vector_idx); - template - void TemplatedScan(Transaction *transaction, RowGroupScanState &state, DataChunk &result); - static void CheckpointDeletes(VersionNode *versions, Serializer &serializer); - static shared_ptr DeserializeDeletes(Deserializer &source); -private: - mutex row_group_lock; - mutex stats_lock; -}; -struct VersionNode { - unique_ptr info[RowGroup::ROW_GROUP_VECTOR_COUNT]; +namespace duckdb { + +struct BoundCaseCheck { + unique_ptr when_expr; + unique_ptr then_expr; + + void Serialize(Serializer &serializer) const; + static BoundCaseCheck Deserialize(Deserializer &source, PlanDeserializationState &state); }; -} // namespace duckdb +class BoundCaseExpression : public Expression { +public: + BoundCaseExpression(LogicalType type); + BoundCaseExpression(unique_ptr when_expr, unique_ptr then_expr, + unique_ptr else_expr); + vector case_checks; + unique_ptr else_expr; +public: + string ToString() const override; -namespace duckdb { + bool Equals(const BaseExpression *other) const override; -struct DataPointer { - uint64_t row_start; - uint64_t tuple_count; - BlockPointer block_pointer; - CompressionType compression_type; - //! Type-specific statistics of the segment - unique_ptr statistics; -}; + unique_ptr Copy() override; -struct RowGroupPointer { - uint64_t row_start; - uint64_t tuple_count; - //! The data pointers of the column segments stored in the row group - vector data_pointers; - //! The per-column statistics of the row group - vector> statistics; - //! The versions information of the row group (if any) - shared_ptr versions; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); }; - } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_cast_expression.hpp +// +// +//===----------------------------------------------------------------------===// + + + + + namespace duckdb { -class BaseStatistics; -class PersistentTableData { +class BoundCastExpression : public Expression { public: - explicit PersistentTableData(idx_t column_count); - ~PersistentTableData(); + BoundCastExpression(unique_ptr child, LogicalType target_type, BoundCastInfo bound_cast, + bool try_cast = false); - vector row_groups; - vector> column_stats; -}; + //! The child type + unique_ptr child; + //! Whether to use try_cast or not. try_cast converts cast failures into NULLs instead of throwing an error. + bool try_cast; + //! The bound cast info + BoundCastInfo bound_cast; -} // namespace duckdb +public: + LogicalType source_type() { + D_ASSERT(child->return_type.IsValid()); + return child->return_type; + } + + //! Cast an expression to the specified SQL type, using only the built-in SQL casts + static unique_ptr AddDefaultCastToType(unique_ptr expr, const LogicalType &target_type, + bool try_cast = false); + //! Cast an expression to the specified SQL type if required + static unique_ptr AddCastToType(ClientContext &context, unique_ptr expr, + const LogicalType &target_type, bool try_cast = false); + //! Returns true if a cast is invertible (i.e. CAST(s -> t -> s) = s for all values of s). This is not true for e.g. + //! boolean casts, because that can be e.g. -1 -> TRUE -> 1. This is necessary to prevent some optimizer bugs. + static bool CastIsInvertible(const LogicalType &source_type, const LogicalType &target_type); + string ToString() const override; + + bool Equals(const BaseExpression *other) const override; + unique_ptr Copy() override; + + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/storage/table_index.hpp +// duckdb/planner/expression/bound_comparison_expression.hpp // // //===----------------------------------------------------------------------===// @@ -2079,10 +2450,34 @@ class PersistentTableData { + +namespace duckdb { + +class BoundComparisonExpression : public Expression { +public: + BoundComparisonExpression(ExpressionType type, unique_ptr left, unique_ptr right); + + unique_ptr left; + unique_ptr right; + +public: + string ToString() const override; + + bool Equals(const BaseExpression *other) const override; + + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); + +public: + static LogicalType BindComparison(LogicalType left_type, LogicalType right_type); +}; +} // namespace duckdb + //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/storage/index.hpp +// duckdb/planner/expression/bound_conjunction_expression.hpp // // //===----------------------------------------------------------------------===// @@ -2091,14 +2486,33 @@ class PersistentTableData { +namespace duckdb { + +class BoundConjunctionExpression : public Expression { +public: + explicit BoundConjunctionExpression(ExpressionType type); + BoundConjunctionExpression(ExpressionType type, unique_ptr left, unique_ptr right); + + vector> children; + +public: + string ToString() const override; + + bool Equals(const BaseExpression *other) const override; + bool PropagatesNullValues() const override; + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/expression_executor.hpp +// duckdb/planner/expression/bound_constant_expression.hpp // // //===----------------------------------------------------------------------===// @@ -2108,326 +2522,241 @@ class PersistentTableData { +namespace duckdb { +class BoundConstantExpression : public Expression { +public: + explicit BoundConstantExpression(Value value); -namespace duckdb { -class Allocator; -class ExecutionContext; + Value value; -//! ExpressionExecutor is responsible for executing a set of expressions and storing the result in a data chunk -class ExpressionExecutor { public: - DUCKDB_API ExpressionExecutor(Allocator &allocator); - DUCKDB_API explicit ExpressionExecutor(Allocator &allocator, const Expression *expression); - DUCKDB_API explicit ExpressionExecutor(Allocator &allocator, const Expression &expression); - DUCKDB_API explicit ExpressionExecutor(Allocator &allocator, const vector> &expressions); + string ToString() const override; - Allocator &allocator; - //! The expressions of the executor - vector expressions; - //! The data chunk of the current physical operator, used to resolve - //! column references and determines the output cardinality - DataChunk *chunk = nullptr; + bool Equals(const BaseExpression *other) const override; + hash_t Hash() const override; + + unique_ptr Copy() override; + + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb + +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_default_expression.hpp +// +// +//===----------------------------------------------------------------------===// + + + + + +namespace duckdb { +class BoundDefaultExpression : public Expression { public: - //! Add an expression to the set of to-be-executed expressions of the executor - DUCKDB_API void AddExpression(const Expression &expr); + explicit BoundDefaultExpression(LogicalType type = LogicalType()) + : Expression(ExpressionType::VALUE_DEFAULT, ExpressionClass::BOUND_DEFAULT, type) { + } - //! Execute the set of expressions with the given input chunk and store the result in the output chunk - DUCKDB_API void Execute(DataChunk *input, DataChunk &result); - inline void Execute(DataChunk &input, DataChunk &result) { - Execute(&input, result); +public: + bool IsScalar() const override { + return false; } - inline void Execute(DataChunk &result) { - Execute(nullptr, result); + bool IsFoldable() const override { + return false; } - //! Execute the ExpressionExecutor and put the result in the result vector; this should only be used for expression - //! executors with a single expression - DUCKDB_API void ExecuteExpression(DataChunk &input, Vector &result); - //! Execute the ExpressionExecutor and put the result in the result vector; this should only be used for expression - //! executors with a single expression - DUCKDB_API void ExecuteExpression(Vector &result); - //! Execute the ExpressionExecutor and generate a selection vector from all true values in the result; this should - //! only be used with a single boolean expression - DUCKDB_API idx_t SelectExpression(DataChunk &input, SelectionVector &sel); + string ToString() const override { + return "DEFAULT"; + } - //! Execute the expression with index `expr_idx` and store the result in the result vector - DUCKDB_API void ExecuteExpression(idx_t expr_idx, Vector &result); - //! Evaluate a scalar expression and fold it into a single value - DUCKDB_API static Value EvaluateScalar(const Expression &expr, bool allow_unfoldable = false); - //! Try to evaluate a scalar expression and fold it into a single value, returns false if an exception is thrown - DUCKDB_API static bool TryEvaluateScalar(const Expression &expr, Value &result); + unique_ptr Copy() override { + return make_unique(return_type); + } - //! Initialize the state of a given expression - static unique_ptr InitializeState(const Expression &expr, ExpressionExecutorState &state); + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb - inline void SetChunk(DataChunk *chunk) { - this->chunk = chunk; - } - inline void SetChunk(DataChunk &chunk) { - SetChunk(&chunk); - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_function_expression.hpp +// +// +//===----------------------------------------------------------------------===// - DUCKDB_API vector> &GetStates(); -protected: - void Initialize(const Expression &expr, ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundReferenceExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundBetweenExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundCaseExpression &expr, ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundCastExpression &expr, ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundComparisonExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundConjunctionExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundConstantExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundFunctionExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundOperatorExpression &expr, - ExpressionExecutorState &state); - static unique_ptr InitializeState(const BoundParameterExpression &expr, - ExpressionExecutorState &state); - void Execute(const Expression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundBetweenExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundCaseExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundCastExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundComparisonExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundConjunctionExpression &expr, ExpressionState *state, const SelectionVector *sel, - idx_t count, Vector &result); - void Execute(const BoundConstantExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundFunctionExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundOperatorExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundParameterExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); - void Execute(const BoundReferenceExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - Vector &result); +namespace duckdb { +class ScalarFunctionCatalogEntry; - //! Execute the (boolean-returning) expression and generate a selection vector with all entries that are "true" in - //! the result - idx_t Select(const Expression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - SelectionVector *true_sel, SelectionVector *false_sel); - idx_t DefaultSelect(const Expression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - SelectionVector *true_sel, SelectionVector *false_sel); +//! Represents a function call that has been bound to a base function +class BoundFunctionExpression : public Expression { +public: + BoundFunctionExpression(LogicalType return_type, ScalarFunction bound_function, + vector> arguments, unique_ptr bind_info, + bool is_operator = false); - idx_t Select(const BoundBetweenExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - SelectionVector *true_sel, SelectionVector *false_sel); - idx_t Select(const BoundComparisonExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, - SelectionVector *true_sel, SelectionVector *false_sel); - idx_t Select(const BoundConjunctionExpression &expr, ExpressionState *state, const SelectionVector *sel, - idx_t count, SelectionVector *true_sel, SelectionVector *false_sel); + //! The bound function expression + ScalarFunction function; + //! List of child-expressions of the function + vector> children; + //! The bound function data (if any) + unique_ptr bind_info; + //! Whether or not the function is an operator, only used for rendering + bool is_operator; - //! Verify that the output of a step in the ExpressionExecutor is correct - void Verify(const Expression &expr, Vector &result, idx_t count); +public: + bool HasSideEffects() const override; + bool IsFoldable() const override; + string ToString() const override; + bool PropagatesNullValues() const override; + hash_t Hash() const override; + bool Equals(const BaseExpression *other) const override; - void FillSwitch(Vector &vector, Vector &result, const SelectionVector &sel, sel_t count); + unique_ptr Copy() override; + void Verify() const override; -private: - //! The states of the expression executor; this holds any intermediates and temporary states of expressions - vector> states; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); }; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_lambda_expression.hpp +// +// +//===----------------------------------------------------------------------===// -namespace duckdb { -class ClientContext; -class Transaction; -struct IndexLock; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/expression/lambda_expression.hpp +// +// +//===----------------------------------------------------------------------===// -//! The index is an abstract base class that serves as the basis for indexes -class Index { -public: - Index(IndexType type, const vector &column_ids, const vector> &unbound_expressions, - IndexConstraintType constraint_type); - virtual ~Index() = default; - //! The type of the index - IndexType type; - //! Column identifiers to extract from the base table - vector column_ids; - //! unordered_set of column_ids used by the index - unordered_set column_id_set; - //! Unbound expressions used by the index - vector> unbound_expressions; - //! The physical types stored in the index - vector types; - //! The logical types of the expressions - vector logical_types; - //! constraint type - IndexConstraintType constraint_type; -public: - //! Initialize a scan on the index with the given expression and column ids - //! to fetch from the base table when we only have one query predicate - virtual unique_ptr InitializeScanSinglePredicate(Transaction &transaction, Value value, - ExpressionType expressionType) = 0; - //! Initialize a scan on the index with the given expression and column ids - //! to fetch from the base table for two query predicates - virtual unique_ptr InitializeScanTwoPredicates(Transaction &transaction, Value low_value, - ExpressionType low_expression_type, Value high_value, - ExpressionType high_expression_type) = 0; - //! Perform a lookup on the index, fetching up to max_count result ids. Returns true if all row ids were fetched, - //! and false otherwise. - virtual bool Scan(Transaction &transaction, DataTable &table, IndexScanState &state, idx_t max_count, - vector &result_ids) = 0; - //! Obtain a lock on the index - virtual void InitializeLock(IndexLock &state); - //! Called when data is appended to the index. The lock obtained from InitializeAppend must be held - virtual bool Append(IndexLock &state, DataChunk &entries, Vector &row_identifiers) = 0; - bool Append(DataChunk &entries, Vector &row_identifiers); - //! Verify that data can be appended to the index - virtual void VerifyAppend(DataChunk &chunk) = 0; - //! Verify that data can be appended to the index for foreign key constraint - virtual void VerifyAppendForeignKey(DataChunk &chunk, string *err_msg_ptr) = 0; - //! Verify that data can be delete from the index for foreign key constraint - virtual void VerifyDeleteForeignKey(DataChunk &chunk, string *err_msg_ptr) = 0; - //! Called when data inside the index is Deleted - virtual void Delete(IndexLock &state, DataChunk &entries, Vector &row_identifiers) = 0; - void Delete(DataChunk &entries, Vector &row_identifiers); - //! Insert data into the index. Does not lock the index. - virtual bool Insert(IndexLock &lock, DataChunk &input, Vector &row_identifiers) = 0; +namespace duckdb { - //! Returns true if the index is affected by updates on the specified column ids, and false otherwise - bool IndexIsUpdated(const vector &column_ids) const; +//! LambdaExpression represents either: +//! 1. A lambda operator that can be used for e.g. mapping an expression to a list +//! 2. An OperatorExpression with the "->" operator +//! Lambda expressions are written in the form of "params -> expr", e.g. "x -> x + 1" +class LambdaExpression : public ParsedExpression { +public: + LambdaExpression(unique_ptr lhs, unique_ptr expr); - //! Returns unique flag - bool IsUnique() { - return (constraint_type == IndexConstraintType::UNIQUE || constraint_type == IndexConstraintType::PRIMARY); - } - //! Returns primary flag - bool IsPrimary() { - return (constraint_type == IndexConstraintType::PRIMARY); - } - //! Returns foreign flag - bool IsForeign() { - return (constraint_type == IndexConstraintType::FOREIGN); - } - //! Serializes the index and returns the pair of block_id offset positions - virtual BlockPointer Serialize(duckdb::MetaBlockWriter &writer); + // we need the context to determine if this is a list of column references or an expression (for JSON) + unique_ptr lhs; -protected: - void ExecuteExpressions(DataChunk &input, DataChunk &result); + vector> params; + unique_ptr expr; - //! Lock used for updating the index - mutex lock; +public: + string ToString() const override; -private: - //! Bound expressions used by the index - vector> bound_expressions; - //! Expression executor for the index expressions - ExpressionExecutor executor; + static bool Equals(const LambdaExpression *a, const LambdaExpression *b); + hash_t Hash() const override; - unique_ptr BindExpression(unique_ptr expr); + unique_ptr Copy() const override; + + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionType type, FieldReader &source); }; } // namespace duckdb -namespace duckdb { -class TableIndex { -public: - //! Scan the catalog set, invoking the callback method for every entry - template - void Scan(T &&callback) { - // lock the catalog set - lock_guard lock(indexes_lock); - for (auto &index : indexes) { - if (callback(*index)) { - break; - } - } - } - void AddIndex(unique_ptr index); +namespace duckdb { - void RemoveIndex(Index *index); +class BoundLambdaExpression : public Expression { +public: + BoundLambdaExpression(ExpressionType type_p, LogicalType return_type_p, unique_ptr lambda_expr_p, + idx_t parameter_count_p); - bool Empty(); + unique_ptr lambda_expr; + vector> captures; + idx_t parameter_count; - idx_t Count(); +public: + string ToString() const override; - Index *FindForeignKeyIndex(const vector &fk_keys, ForeignKeyType fk_type); + bool Equals(const BaseExpression *other) const override; - //! Serialize all indexes owned by this table, returns a vector of block info of all indexes - vector SerializeIndexes(duckdb::MetaBlockWriter &writer); + unique_ptr Copy() override; -private: - //! Indexes associated with the current table - mutex indexes_lock; - vector> indexes; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); }; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_operator_expression.hpp +// +// +//===----------------------------------------------------------------------===// + + + + namespace duckdb { -class CatalogEntry; -struct BoundCreateTableInfo { - explicit BoundCreateTableInfo(unique_ptr base_p) : base(move(base_p)) { - D_ASSERT(base); - } +class BoundOperatorExpression : public Expression { +public: + BoundOperatorExpression(ExpressionType type, LogicalType return_type); - //! The schema to create the table in - SchemaCatalogEntry *schema; - //! The base CreateInfo object - unique_ptr base; - //! The map of column names -> column index, used during binding - case_insensitive_map_t name_map; - //! Column dependency manager of the table - ColumnDependencyManager column_dependency_manager; - //! List of constraints on the table - vector> constraints; - //! List of bound constraints on the table - vector> bound_constraints; - //! Bound default values - vector> bound_defaults; - //! Dependents of the table (in e.g. default values) - unordered_set dependencies; - //! The existing table data on disk (if any) - unique_ptr data; - //! CREATE TABLE from QUERY - unique_ptr query; - //! Indexes created by this table - vector indexes; + vector> children; - //! Serializes a BoundCreateTableInfo to a stand-alone binary blob - void Serialize(Serializer &serializer) const; - //! Deserializes a blob back into a BoundCreateTableInfo - static unique_ptr Deserialize(Deserializer &source, PlanDeserializationState &state); +public: + string ToString() const override; - CreateTableInfo &Base() { - D_ASSERT(base); - return (CreateTableInfo &)*base; - } -}; + bool Equals(const BaseExpression *other) const override; + + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_parameter_expression.hpp +// +// +//===----------------------------------------------------------------------===// + + + //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/catalog/default/default_types.hpp +// duckdb/planner/expression/bound_parameter_data.hpp // // //===----------------------------------------------------------------------===// @@ -2437,20 +2766,82 @@ struct BoundCreateTableInfo { + namespace duckdb { -class SchemaCatalogEntry; -class DefaultTypeGenerator : public DefaultGenerator { +struct BoundParameterData { + BoundParameterData() { + } + BoundParameterData(Value val) : value(move(val)), return_type(value.type()) { + } + + Value value; + LogicalType return_type; + public: - DefaultTypeGenerator(Catalog &catalog, SchemaCatalogEntry *schema); + void Serialize(Serializer &serializer) const { + FieldWriter writer(serializer); + value.Serialize(writer.GetSerializer()); + writer.WriteSerializable(return_type); + writer.Finalize(); + } - SchemaCatalogEntry *schema; + static shared_ptr Deserialize(Deserializer &source) { + FieldReader reader(source); + auto value = Value::Deserialize(reader.GetSource()); + auto result = make_shared(move(value)); + result->return_type = reader.ReadRequiredSerializable(); + reader.Finalize(); + return result; + } +}; + +struct BoundParameterMap { + BoundParameterMap(vector ¶meter_data) : parameter_data(parameter_data) { + } + + bound_parameter_map_t parameters; + vector ¶meter_data; + + LogicalType GetReturnType(idx_t index) { + if (index >= parameter_data.size()) { + return LogicalTypeId::UNKNOWN; + } + return parameter_data[index].return_type; + } +}; +} // namespace duckdb + + +namespace duckdb { + +class BoundParameterExpression : public Expression { public: - DUCKDB_API static LogicalTypeId GetDefaultType(const string &name); + explicit BoundParameterExpression(idx_t parameter_nr); - unique_ptr CreateDefaultEntry(ClientContext &context, const string &entry_name) override; - vector GetDefaultEntries() override; + idx_t parameter_nr; + shared_ptr parameter_data; + +public: + //! Invalidate a bound parameter expression - forcing a rebind on any subsequent filters + DUCKDB_API static void Invalidate(Expression &expr); + //! Invalidate all parameters within an expression + DUCKDB_API static void InvalidateRecursive(Expression &expr); + + bool IsScalar() const override; + bool HasParameter() const override; + bool IsFoldable() const override; + + string ToString() const override; + + bool Equals(const BaseExpression *other) const override; + hash_t Hash() const override; + + unique_ptr Copy() override; + + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); }; } // namespace duckdb @@ -2458,7 +2849,7 @@ class DefaultTypeGenerator : public DefaultGenerator { //===----------------------------------------------------------------------===// // DuckDB // -// extension_functions.hpp +// duckdb/planner/expression/bound_reference_expression.hpp // // //===----------------------------------------------------------------------===// @@ -2469,918 +2860,670 @@ class DefaultTypeGenerator : public DefaultGenerator { namespace duckdb { -struct ExtensionFunction { - char function[48]; - char extension[48]; -}; +//! A BoundReferenceExpression represents a physical index into a DataChunk +class BoundReferenceExpression : public Expression { +public: + BoundReferenceExpression(string alias, LogicalType type, idx_t index); + BoundReferenceExpression(LogicalType type, storage_t index); -static constexpr ExtensionFunction EXTENSION_FUNCTIONS[] = { - {"->>", "json"}, - {"array_to_json", "json"}, - {"create_fts_index", "fts"}, - {"dbgen", "tpch"}, - {"drop_fts_index", "fts"}, - {"dsdgen", "tpcds"}, - {"excel_text", "excel"}, - {"from_json", "json"}, - {"from_json_strict", "json"}, - {"from_substrait", "substrait"}, - {"get_substrait", "substrait"}, - {"get_substrait_json", "substrait"}, - {"icu_calendar_names", "icu"}, - {"icu_sort_key", "icu"}, - {"json", "json"}, - {"json_array", "json"}, - {"json_array_length", "json"}, - {"json_extract", "json"}, - {"json_extract_path", "json"}, - {"json_extract_path_text", "json"}, - {"json_extract_string", "json"}, - {"json_group_array", "json"}, - {"json_group_object", "json"}, - {"json_group_structure", "json"}, - {"json_merge_patch", "json"}, - {"json_object", "json"}, - {"json_quote", "json"}, - {"json_structure", "json"}, - {"json_transform", "json"}, - {"json_transform_strict", "json"}, - {"json_type", "json"}, - {"json_valid", "json"}, - {"make_timestamptz", "icu"}, - {"parquet_metadata", "parquet"}, - {"parquet_scan", "parquet"}, - {"parquet_schema", "parquet"}, - {"pg_timezone_names", "icu"}, - {"postgres_attach", "postgres_scanner"}, - {"postgres_scan", "postgres_scanner"}, - {"postgres_scan_pushdown", "postgres_scanner"}, - {"read_json_objects", "json"}, - {"read_ndjson_objects", "json"}, - {"read_parquet", "parquet"}, - {"row_to_json", "json"}, - {"sqlite_attach", "sqlite_scanner"}, - {"sqlite_scan", "sqlite_scanner"}, - {"stem", "fts"}, - {"text", "excel"}, - {"to_json", "json"}, - {"tpcds", "tpcds"}, - {"tpcds_answers", "tpcds"}, - {"tpcds_queries", "tpcds"}, - {"tpch", "tpch"}, - {"tpch_answers", "tpch"}, - {"tpch_queries", "tpch"}, - {"visualize_diff_profiling_output", "visualizer"}, - {"visualize_json_profiling_output", "visualizer"}, - {"visualize_last_profiling_output", "visualizer"}, -}; -} // namespace duckdb + //! Index used to access data in the chunks + storage_t index; -#include -namespace duckdb { +public: + bool IsScalar() const override { + return false; + } + bool IsFoldable() const override { + return false; + } -string SimilarCatalogEntry::GetQualifiedName() const { - D_ASSERT(Found()); + string ToString() const override; - return schema->name + "." + name; -} + hash_t Hash() const override; + bool Equals(const BaseExpression *other) const override; -Catalog::Catalog(DatabaseInstance &db) - : db(db), schemas(make_unique(*this, make_unique(*this))), - dependency_manager(make_unique(*this)) { - catalog_version = 0; -} -Catalog::~Catalog() { -} + unique_ptr Copy() override; -Catalog &Catalog::GetCatalog(ClientContext &context) { - return context.db->GetCatalog(); -} + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb -CatalogEntry *Catalog::CreateTable(ClientContext &context, BoundCreateTableInfo *info) { - auto schema = GetSchema(context, info->base->schema); - return CreateTable(context, schema, info); -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_subquery_expression.hpp +// +// +//===----------------------------------------------------------------------===// -CatalogEntry *Catalog::CreateTable(ClientContext &context, unique_ptr info) { - auto binder = Binder::CreateBinder(context); - auto bound_info = binder->BindCreateTableInfo(move(info)); - return CreateTable(context, bound_info.get()); -} -CatalogEntry *Catalog::CreateTable(ClientContext &context, SchemaCatalogEntry *schema, BoundCreateTableInfo *info) { - return schema->CreateTable(context, info); -} -CatalogEntry *Catalog::CreateView(ClientContext &context, CreateViewInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateView(context, schema, info); -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/enums/subquery_type.hpp +// +// +//===----------------------------------------------------------------------===// -CatalogEntry *Catalog::CreateView(ClientContext &context, SchemaCatalogEntry *schema, CreateViewInfo *info) { - return schema->CreateView(context, info); -} -CatalogEntry *Catalog::CreateSequence(ClientContext &context, CreateSequenceInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateSequence(context, schema, info); -} -CatalogEntry *Catalog::CreateType(ClientContext &context, CreateTypeInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateType(context, schema, info); -} -CatalogEntry *Catalog::CreateSequence(ClientContext &context, SchemaCatalogEntry *schema, CreateSequenceInfo *info) { - return schema->CreateSequence(context, info); -} -CatalogEntry *Catalog::CreateType(ClientContext &context, SchemaCatalogEntry *schema, CreateTypeInfo *info) { - return schema->CreateType(context, info); -} +namespace duckdb { -CatalogEntry *Catalog::CreateTableFunction(ClientContext &context, CreateTableFunctionInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateTableFunction(context, schema, info); -} +//===--------------------------------------------------------------------===// +// Subquery Types +//===--------------------------------------------------------------------===// +enum class SubqueryType : uint8_t { + INVALID = 0, + SCALAR = 1, // Regular scalar subquery + EXISTS = 2, // EXISTS (SELECT...) + NOT_EXISTS = 3, // NOT EXISTS(SELECT...) + ANY = 4, // x = ANY(SELECT...) OR x IN (SELECT...) +}; -CatalogEntry *Catalog::CreateTableFunction(ClientContext &context, SchemaCatalogEntry *schema, - CreateTableFunctionInfo *info) { - return schema->CreateTableFunction(context, info); -} +} // namespace duckdb -CatalogEntry *Catalog::CreateCopyFunction(ClientContext &context, CreateCopyFunctionInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateCopyFunction(context, schema, info); -} -CatalogEntry *Catalog::CreateCopyFunction(ClientContext &context, SchemaCatalogEntry *schema, - CreateCopyFunctionInfo *info) { - return schema->CreateCopyFunction(context, info); -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/bound_query_node.hpp +// +// +//===----------------------------------------------------------------------===// -CatalogEntry *Catalog::CreatePragmaFunction(ClientContext &context, CreatePragmaFunctionInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreatePragmaFunction(context, schema, info); -} -CatalogEntry *Catalog::CreatePragmaFunction(ClientContext &context, SchemaCatalogEntry *schema, - CreatePragmaFunctionInfo *info) { - return schema->CreatePragmaFunction(context, info); -} -CatalogEntry *Catalog::CreateFunction(ClientContext &context, CreateFunctionInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateFunction(context, schema, info); -} -CatalogEntry *Catalog::CreateFunction(ClientContext &context, SchemaCatalogEntry *schema, CreateFunctionInfo *info) { - return schema->CreateFunction(context, info); -} -CatalogEntry *Catalog::CreateCollation(ClientContext &context, CreateCollationInfo *info) { - auto schema = GetSchema(context, info->schema); - return CreateCollation(context, schema, info); -} -CatalogEntry *Catalog::CreateCollation(ClientContext &context, SchemaCatalogEntry *schema, CreateCollationInfo *info) { - return schema->CreateCollation(context, info); -} -CatalogEntry *Catalog::CreateSchema(ClientContext &context, CreateSchemaInfo *info) { - D_ASSERT(!info->schema.empty()); - if (info->schema == TEMP_SCHEMA) { - throw CatalogException("Cannot create built-in schema \"%s\"", info->schema); - } +namespace duckdb { - unordered_set dependencies; - auto entry = make_unique(this, info->schema, info->internal); - auto result = entry.get(); - if (!schemas->CreateEntry(context, info->schema, move(entry), dependencies)) { - if (info->on_conflict == OnCreateConflict::ERROR_ON_CONFLICT) { - throw CatalogException("Schema with name %s already exists!", info->schema); - } else { - D_ASSERT(info->on_conflict == OnCreateConflict::IGNORE_ON_CONFLICT); - } - return nullptr; +//! Bound equivalent of QueryNode +class BoundQueryNode { +public: + explicit BoundQueryNode(QueryNodeType type) : type(type) { } - return result; -} - -void Catalog::DropSchema(ClientContext &context, DropInfo *info) { - D_ASSERT(!info->name.empty()); - ModifyCatalog(); - if (!schemas->DropEntry(context, info->name, info->cascade)) { - if (!info->if_exists) { - throw CatalogException("Schema with name \"%s\" does not exist!", info->name); - } + virtual ~BoundQueryNode() { } -} -void Catalog::DropEntry(ClientContext &context, DropInfo *info) { - ModifyCatalog(); - if (info->type == CatalogType::SCHEMA_ENTRY) { - // DROP SCHEMA - DropSchema(context, info); - return; - } + //! The type of the query node, either SetOperation or Select + QueryNodeType type; + //! The result modifiers that should be applied to this query node + vector> modifiers; - auto lookup = LookupEntry(context, info->type, info->schema, info->name, info->if_exists); - if (!lookup.Found()) { - return; - } + //! The names returned by this QueryNode. + vector names; + //! The types returned by this QueryNode. + vector types; - lookup.schema->DropEntry(context, info); -} +public: + virtual idx_t GetRootIndex() = 0; +}; -CatalogEntry *Catalog::AddFunction(ClientContext &context, CreateFunctionInfo *info) { - auto schema = GetSchema(context, info->schema); - return AddFunction(context, schema, info); -} +} // namespace duckdb -CatalogEntry *Catalog::AddFunction(ClientContext &context, SchemaCatalogEntry *schema, CreateFunctionInfo *info) { - return schema->AddFunction(context, info); -} -SchemaCatalogEntry *Catalog::GetSchema(ClientContext &context, const string &schema_name, bool if_exists, - QueryErrorContext error_context) { - D_ASSERT(!schema_name.empty()); - if (schema_name == TEMP_SCHEMA) { - return ClientData::Get(context).temporary_objects.get(); - } - auto entry = schemas->GetEntry(context, schema_name); - if (!entry && !if_exists) { - throw CatalogException(error_context.FormatError("Schema with name %s does not exist!", schema_name)); - } - return (SchemaCatalogEntry *)entry; -} -void Catalog::ScanSchemas(ClientContext &context, std::function callback) { - // create all default schemas first - schemas->Scan(context, [&](CatalogEntry *entry) { callback(entry); }); -} +namespace duckdb { -SimilarCatalogEntry Catalog::SimilarEntryInSchemas(ClientContext &context, const string &entry_name, CatalogType type, - const vector &schemas) { +class BoundSubqueryExpression : public Expression { +public: + explicit BoundSubqueryExpression(LogicalType return_type); - vector sets; - std::transform(schemas.begin(), schemas.end(), std::back_inserter(sets), - [type](SchemaCatalogEntry *s) -> CatalogSet * { return &s->GetCatalogSet(type); }); - pair most_similar {"", (idx_t)-1}; - SchemaCatalogEntry *schema_of_most_similar = nullptr; - for (auto schema : schemas) { - auto entry = schema->GetCatalogSet(type).SimilarEntry(context, entry_name); - if (!entry.first.empty() && (most_similar.first.empty() || most_similar.second > entry.second)) { - most_similar = entry; - schema_of_most_similar = schema; - } + bool IsCorrelated() { + return binder->correlated_columns.size() > 0; } - return {most_similar.first, most_similar.second, schema_of_most_similar}; -} + //! The binder used to bind the subquery node + shared_ptr binder; + //! The bound subquery node + unique_ptr subquery; + //! The subquery type + SubqueryType subquery_type; + //! the child expression to compare with (in case of IN, ANY, ALL operators) + unique_ptr child; + //! The comparison type of the child expression with the subquery (in case of ANY, ALL operators) + ExpressionType comparison_type; + //! The LogicalType of the subquery result. Only used for ANY expressions. + LogicalType child_type; + //! The target LogicalType of the subquery result (i.e. to which type it should be casted, if child_type <> + //! child_target). Only used for ANY expressions. + LogicalType child_target; -string FindExtension(const string &function_name) { - auto size = sizeof(EXTENSION_FUNCTIONS) / sizeof(ExtensionFunction); - auto it = std::lower_bound( - EXTENSION_FUNCTIONS, EXTENSION_FUNCTIONS + size, function_name, - [](const ExtensionFunction &element, const string &value) { return element.function < value; }); - if (it != EXTENSION_FUNCTIONS + size && it->function == function_name) { - return it->extension; +public: + bool HasSubquery() const override { + return true; } - return ""; -} -CatalogException Catalog::CreateMissingEntryException(ClientContext &context, const string &entry_name, - CatalogType type, const vector &schemas, - QueryErrorContext error_context) { - auto entry = SimilarEntryInSchemas(context, entry_name, type, schemas); - - vector unseen_schemas; - this->schemas->Scan([&schemas, &unseen_schemas](CatalogEntry *entry) { - auto schema_entry = (SchemaCatalogEntry *)entry; - if (std::find(schemas.begin(), schemas.end(), schema_entry) == schemas.end()) { - unseen_schemas.emplace_back(schema_entry); - } - }); - auto unseen_entry = SimilarEntryInSchemas(context, entry_name, type, unseen_schemas); - auto extension_name = FindExtension(entry_name); - if (!extension_name.empty()) { - return CatalogException("Function with name %s is not on the catalog, but it exists in the %s extension. To " - "Install and Load the extension, run: INSTALL %s; LOAD %s;", - entry_name, extension_name, extension_name, extension_name); + bool IsScalar() const override { + return false; } - string did_you_mean; - if (unseen_entry.Found() && unseen_entry.distance < entry.distance) { - did_you_mean = "\nDid you mean \"" + unseen_entry.GetQualifiedName() + "\"?"; - } else if (entry.Found()) { - did_you_mean = "\nDid you mean \"" + entry.name + "\"?"; + bool IsFoldable() const override { + return false; } - return CatalogException(error_context.FormatError("%s with name %s does not exist!%s", CatalogTypeToString(type), - entry_name, did_you_mean)); -} + string ToString() const override; -CatalogEntryLookup Catalog::LookupEntry(ClientContext &context, CatalogType type, const string &schema_name, - const string &name, bool if_exists, QueryErrorContext error_context) { - if (!schema_name.empty()) { - auto schema = GetSchema(context, schema_name, if_exists, error_context); - if (!schema) { - D_ASSERT(if_exists); - return {nullptr, nullptr}; - } + bool Equals(const BaseExpression *other) const override; - auto entry = schema->GetCatalogSet(type).GetEntry(context, name); - if (!entry && !if_exists) { - throw CreateMissingEntryException(context, name, type, {schema}, error_context); - } + unique_ptr Copy() override; - return {schema, entry}; - } + bool PropagatesNullValues() const override; - const auto &paths = ClientData::Get(context).catalog_search_path->Get(); - for (const auto &path : paths) { - auto lookup = LookupEntry(context, type, path, name, true, error_context); - if (lookup.Found()) { - return lookup; - } - } + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb - if (!if_exists) { - vector schemas; - for (const auto &path : paths) { - auto schema = GetSchema(context, path, true); - if (schema) { - schemas.emplace_back(schema); - } - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression/bound_unnest_expression.hpp +// +// +//===----------------------------------------------------------------------===// - throw CreateMissingEntryException(context, name, type, schemas, error_context); - } - return {nullptr, nullptr}; -} -CatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema, const string &name) { - vector entry_types {CatalogType::TABLE_ENTRY, CatalogType::SEQUENCE_ENTRY}; - for (auto entry_type : entry_types) { - CatalogEntry *result = GetEntry(context, entry_type, schema, name, true); - if (result != nullptr) { - return result; - } - } - throw CatalogException("CatalogElement \"%s.%s\" does not exist!", schema, name); -} +namespace duckdb { -CatalogEntry *Catalog::GetEntry(ClientContext &context, CatalogType type, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return LookupEntry(context, type, schema_name, name, if_exists, error_context).entry; -} +//! Represents a function call that has been bound to a base function +class BoundUnnestExpression : public Expression { +public: + explicit BoundUnnestExpression(LogicalType return_type); -template <> -TableCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - auto entry = GetEntry(context, CatalogType::TABLE_ENTRY, schema_name, name, if_exists); - if (!entry) { - return nullptr; - } - if (entry->type != CatalogType::TABLE_ENTRY) { - throw CatalogException(error_context.FormatError("%s is not a table", name)); - } - return (TableCatalogEntry *)entry; -} - -template <> -SequenceCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return (SequenceCatalogEntry *)GetEntry(context, CatalogType::SEQUENCE_ENTRY, schema_name, name, if_exists, - error_context); -} - -template <> -TableFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return (TableFunctionCatalogEntry *)GetEntry(context, CatalogType::TABLE_FUNCTION_ENTRY, schema_name, name, - if_exists, error_context); -} - -template <> -CopyFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return (CopyFunctionCatalogEntry *)GetEntry(context, CatalogType::COPY_FUNCTION_ENTRY, schema_name, name, if_exists, - error_context); -} - -template <> -PragmaFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return (PragmaFunctionCatalogEntry *)GetEntry(context, CatalogType::PRAGMA_FUNCTION_ENTRY, schema_name, name, - if_exists, error_context); -} - -template <> -AggregateFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - auto entry = GetEntry(context, CatalogType::AGGREGATE_FUNCTION_ENTRY, schema_name, name, if_exists, error_context); - if (entry->type != CatalogType::AGGREGATE_FUNCTION_ENTRY) { - throw CatalogException(error_context.FormatError("%s is not an aggregate function", name)); - } - return (AggregateFunctionCatalogEntry *)entry; -} - -template <> -CollateCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return (CollateCatalogEntry *)GetEntry(context, CatalogType::COLLATION_ENTRY, schema_name, name, if_exists, - error_context); -} - -template <> -TypeCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, - bool if_exists, QueryErrorContext error_context) { - return (TypeCatalogEntry *)GetEntry(context, CatalogType::TYPE_ENTRY, schema_name, name, if_exists, error_context); -} - -LogicalType Catalog::GetType(ClientContext &context, const string &schema, const string &name) { - auto user_type_catalog = GetEntry(context, schema, name); - auto result_type = user_type_catalog->user_type; - LogicalType::SetCatalog(result_type, user_type_catalog); - return result_type; -} + unique_ptr child; -void Catalog::Alter(ClientContext &context, AlterInfo *info) { - ModifyCatalog(); - auto lookup = LookupEntry(context, info->GetCatalogType(), info->schema, info->name, info->if_exists); - if (!lookup.Found()) { - return; - } - return lookup.schema->Alter(context, info); -} +public: + bool IsFoldable() const override; + string ToString() const override; -idx_t Catalog::GetCatalogVersion() { - return catalog_version; -} + hash_t Hash() const override; + bool Equals(const BaseExpression *other) const override; -idx_t Catalog::ModifyCatalog() { - return catalog_version++; -} + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; } // namespace duckdb - - //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/common/queue.hpp +// duckdb/planner/expression/bound_window_expression.hpp // // //===----------------------------------------------------------------------===// -#include - -namespace duckdb { -using std::queue; -} - - -namespace duckdb { +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/expression/window_expression.hpp +// +// +//===----------------------------------------------------------------------===// -ColumnDependencyManager::ColumnDependencyManager() { -} -ColumnDependencyManager::~ColumnDependencyManager() { -} -void ColumnDependencyManager::AddGeneratedColumn(const ColumnDefinition &column, - const case_insensitive_map_t &name_map) { - D_ASSERT(column.Generated()); - vector referenced_columns; - column.GetListOfDependencies(referenced_columns); - vector indices; - for (auto &col : referenced_columns) { - auto entry = name_map.find(col); - if (entry == name_map.end()) { - throw InvalidInputException("Referenced column \"%s\" was not found in the table", col); - } - indices.push_back(entry->second); - } - return AddGeneratedColumn(column.Oid(), indices); -} -void ColumnDependencyManager::AddGeneratedColumn(column_t index, const vector &indices, bool root) { - if (indices.empty()) { - return; - } - auto &list = dependents_map[index]; - // Create a link between the dependencies - for (auto &dep : indices) { - // Add this column as a dependency of the new column - list.insert(dep); - // Add the new column as a dependent of the column - dependencies_map[dep].insert(index); - // Inherit the dependencies - if (HasDependencies(dep)) { - auto &inherited_deps = dependents_map[dep]; - D_ASSERT(!inherited_deps.empty()); - for (auto &inherited_dep : inherited_deps) { - list.insert(inherited_dep); - dependencies_map[inherited_dep].insert(index); - } - } - if (!root) { - continue; - } - direct_dependencies[index].insert(dep); - } - if (!HasDependents(index)) { - return; - } - auto &dependents = dependencies_map[index]; - if (dependents.count(index)) { - throw InvalidInputException("Circular dependency encountered when resolving generated column expressions"); - } - // Also let the dependents of this generated column inherit the dependencies - for (auto &dependent : dependents) { - AddGeneratedColumn(dependent, indices, false); - } -} -vector ColumnDependencyManager::RemoveColumn(column_t index, column_t column_amount) { - // Always add the initial column - deleted_columns.insert(index); - RemoveGeneratedColumn(index); - RemoveStandardColumn(index); +namespace duckdb { - // Clean up the internal list - vector new_indices = CleanupInternals(column_amount); - D_ASSERT(deleted_columns.empty()); - return new_indices; -} +enum class WindowBoundary : uint8_t { + INVALID = 0, + UNBOUNDED_PRECEDING = 1, + UNBOUNDED_FOLLOWING = 2, + CURRENT_ROW_RANGE = 3, + CURRENT_ROW_ROWS = 4, + EXPR_PRECEDING_ROWS = 5, + EXPR_FOLLOWING_ROWS = 6, + EXPR_PRECEDING_RANGE = 7, + EXPR_FOLLOWING_RANGE = 8 +}; -bool ColumnDependencyManager::IsDependencyOf(column_t gcol, column_t col) const { - auto entry = dependents_map.find(gcol); - if (entry == dependents_map.end()) { - return false; - } - auto &list = entry->second; - return list.count(col); -} +//! The WindowExpression represents a window function in the query. They are a special case of aggregates which is why +//! they inherit from them. +class WindowExpression : public ParsedExpression { +public: + WindowExpression(ExpressionType type, string schema_name, const string &function_name); -bool ColumnDependencyManager::HasDependencies(column_t index) const { - auto entry = dependents_map.find(index); - if (entry == dependents_map.end()) { - return false; - } - return true; -} + //! Schema of the aggregate function + string schema; + //! Name of the aggregate function + string function_name; + //! The child expression of the main window function + vector> children; + //! The set of expressions to partition by + vector> partitions; + //! The set of ordering clauses + vector orders; + //! Expression representing a filter, only used for aggregates + unique_ptr filter_expr; + //! True to ignore NULL values + bool ignore_nulls; + //! The window boundaries + WindowBoundary start = WindowBoundary::INVALID; + WindowBoundary end = WindowBoundary::INVALID; -const unordered_set &ColumnDependencyManager::GetDependencies(column_t index) const { - auto entry = dependents_map.find(index); - D_ASSERT(entry != dependents_map.end()); - return entry->second; -} + unique_ptr start_expr; + unique_ptr end_expr; + //! Offset and default expressions for WINDOW_LEAD and WINDOW_LAG functions + unique_ptr offset_expr; + unique_ptr default_expr; -bool ColumnDependencyManager::HasDependents(column_t index) const { - auto entry = dependencies_map.find(index); - if (entry == dependencies_map.end()) { - return false; +public: + bool IsWindow() const override { + return true; } - return true; -} -const unordered_set &ColumnDependencyManager::GetDependents(column_t index) const { - auto entry = dependencies_map.find(index); - D_ASSERT(entry != dependencies_map.end()); - return entry->second; -} + //! Convert the Expression to a String + string ToString() const override; -void ColumnDependencyManager::RemoveStandardColumn(column_t index) { - if (!HasDependents(index)) { - return; - } - auto dependents = dependencies_map[index]; - for (auto &gcol : dependents) { - // If index is a direct dependency of gcol, remove it from the list - if (direct_dependencies.find(gcol) != direct_dependencies.end()) { - direct_dependencies[gcol].erase(index); - } - RemoveGeneratedColumn(gcol); - } - // Remove this column from the dependencies map - dependencies_map.erase(index); -} + static bool Equals(const WindowExpression *a, const WindowExpression *b); -void ColumnDependencyManager::RemoveGeneratedColumn(column_t index) { - deleted_columns.insert(index); - if (!HasDependencies(index)) { - return; - } - auto &dependencies = dependents_map[index]; - for (auto &col : dependencies) { - // Remove this generated column from the list of this column - auto &col_dependents = dependencies_map[col]; - D_ASSERT(col_dependents.count(index)); - col_dependents.erase(index); - // If the resulting list is empty, remove the column from the dependencies map altogether - if (col_dependents.empty()) { - dependencies_map.erase(col); - } - } - // Remove this column from the dependents_map map - dependents_map.erase(index); -} + unique_ptr Copy() const override; -void ColumnDependencyManager::AdjustSingle(column_t idx, idx_t offset) { - D_ASSERT(idx >= offset); - column_t new_idx = idx - offset; - // Adjust this index in the dependents of this column - bool has_dependents = HasDependents(idx); - bool has_dependencies = HasDependencies(idx); + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionType type, FieldReader &source); - if (has_dependents) { - auto &dependents = GetDependents(idx); - for (auto &dep : dependents) { - auto &dep_dependencies = dependents_map[dep]; - dep_dependencies.erase(idx); - D_ASSERT(!dep_dependencies.count(new_idx)); - dep_dependencies.insert(new_idx); +public: + template + static string ToString(const T &entry, const string &schema, const string &function_name) { + // Start with function call + string result = schema.empty() ? function_name : schema + "." + function_name; + result += "("; + result += StringUtil::Join(entry.children, entry.children.size(), ", ", + [](const unique_ptr &child) { return child->ToString(); }); + // Lead/Lag extra arguments + if (entry.offset_expr.get()) { + result += ", "; + result += entry.offset_expr->ToString(); } - } - if (has_dependencies) { - auto &dependencies = GetDependencies(idx); - for (auto &dep : dependencies) { - auto &dep_dependents = dependencies_map[dep]; - dep_dependents.erase(idx); - D_ASSERT(!dep_dependents.count(new_idx)); - dep_dependents.insert(new_idx); + if (entry.default_expr.get()) { + result += ", "; + result += entry.default_expr->ToString(); + } + // IGNORE NULLS + if (entry.ignore_nulls) { + result += " IGNORE NULLS"; + } + // FILTER + if (entry.filter_expr) { + result += ") FILTER (WHERE " + entry.filter_expr->ToString(); + } + + // Over clause + result += ") OVER ("; + string sep; + + // Partitions + if (!entry.partitions.empty()) { + result += "PARTITION BY "; + result += StringUtil::Join(entry.partitions, entry.partitions.size(), ", ", + [](const unique_ptr &partition) { return partition->ToString(); }); + sep = " "; + } + + // Orders + if (!entry.orders.empty()) { + result += sep; + result += "ORDER BY "; + result += StringUtil::Join(entry.orders, entry.orders.size(), ", ", + [](const ORDER_NODE &order) { return order.ToString(); }); + sep = " "; + } + + // Rows/Range + string units = "ROWS"; + string from; + switch (entry.start) { + case WindowBoundary::CURRENT_ROW_RANGE: + case WindowBoundary::CURRENT_ROW_ROWS: + from = "CURRENT ROW"; + units = (entry.start == WindowBoundary::CURRENT_ROW_RANGE) ? "RANGE" : "ROWS"; + break; + case WindowBoundary::UNBOUNDED_PRECEDING: + if (entry.end != WindowBoundary::CURRENT_ROW_RANGE) { + from = "UNBOUNDED PRECEDING"; + } + break; + case WindowBoundary::EXPR_PRECEDING_ROWS: + case WindowBoundary::EXPR_PRECEDING_RANGE: + from = entry.start_expr->ToString() + " PRECEDING"; + units = (entry.start == WindowBoundary::EXPR_PRECEDING_RANGE) ? "RANGE" : "ROWS"; + break; + case WindowBoundary::EXPR_FOLLOWING_ROWS: + case WindowBoundary::EXPR_FOLLOWING_RANGE: + from = entry.start_expr->ToString() + " FOLLOWING"; + units = (entry.start == WindowBoundary::EXPR_FOLLOWING_RANGE) ? "RANGE" : "ROWS"; + break; + default: + throw InternalException("Unrecognized FROM in WindowExpression"); } - } - if (has_dependents) { - D_ASSERT(!dependencies_map.count(new_idx)); - dependencies_map[new_idx] = move(dependencies_map[idx]); - dependencies_map.erase(idx); - } - if (has_dependencies) { - D_ASSERT(!dependents_map.count(new_idx)); - dependents_map[new_idx] = move(dependents_map[idx]); - dependents_map.erase(idx); - } -} -vector ColumnDependencyManager::CleanupInternals(column_t column_amount) { - vector to_adjust; - D_ASSERT(!deleted_columns.empty()); - // Get the lowest index that was deleted - vector new_indices(column_amount, DConstants::INVALID_INDEX); - column_t threshold = *deleted_columns.begin(); - - idx_t offset = 0; - for (column_t i = 0; i < column_amount; i++) { - new_indices[i] = i - offset; - if (deleted_columns.count(i)) { - offset++; - continue; - } - if (i > threshold && (HasDependencies(i) || HasDependents(i))) { - to_adjust.push_back(i); + string to; + switch (entry.end) { + case WindowBoundary::CURRENT_ROW_RANGE: + if (entry.start != WindowBoundary::UNBOUNDED_PRECEDING) { + to = "CURRENT ROW"; + units = "RANGE"; + } + break; + case WindowBoundary::CURRENT_ROW_ROWS: + to = "CURRENT ROW"; + units = "ROWS"; + break; + case WindowBoundary::UNBOUNDED_PRECEDING: + to = "UNBOUNDED PRECEDING"; + break; + case WindowBoundary::UNBOUNDED_FOLLOWING: + to = "UNBOUNDED FOLLOWING"; + break; + case WindowBoundary::EXPR_PRECEDING_ROWS: + case WindowBoundary::EXPR_PRECEDING_RANGE: + to = entry.end_expr->ToString() + " PRECEDING"; + units = (entry.end == WindowBoundary::EXPR_PRECEDING_RANGE) ? "RANGE" : "ROWS"; + break; + case WindowBoundary::EXPR_FOLLOWING_ROWS: + case WindowBoundary::EXPR_FOLLOWING_RANGE: + to = entry.end_expr->ToString() + " FOLLOWING"; + units = (entry.end == WindowBoundary::EXPR_FOLLOWING_RANGE) ? "RANGE" : "ROWS"; + break; + default: + throw InternalException("Unrecognized TO in WindowExpression"); } - } - - // Adjust all indices inside the dependency managers internal mappings - for (auto &col : to_adjust) { - offset = col - new_indices[col]; - AdjustSingle(col, offset); - } - deleted_columns.clear(); - return new_indices; -} -stack ColumnDependencyManager::GetBindOrder(const vector &columns) { - stack bind_order; - queue to_visit; - unordered_set visited; - - for (auto &entry : direct_dependencies) { - auto dependent = entry.first; - //! Skip the dependents that are also dependencies - if (dependencies_map.find(dependent) != dependencies_map.end()) { - continue; - } - bind_order.push(dependent); - visited.insert(dependent); - for (auto &dependency : direct_dependencies[dependent]) { - to_visit.push(dependency); + if (!from.empty() || !to.empty()) { + result += sep + units; } - } - - while (!to_visit.empty()) { - auto column = to_visit.front(); - to_visit.pop(); - - //! If this column does not have dependencies, the queue stops getting filled - if (direct_dependencies.find(column) == direct_dependencies.end()) { - continue; + if (!from.empty() && !to.empty()) { + result += " BETWEEN "; + result += from; + result += " AND "; + result += to; + } else if (!from.empty()) { + result += " "; + result += from; + } else if (!to.empty()) { + result += " "; + result += to; } - bind_order.push(column); - visited.insert(column); - for (auto &dependency : direct_dependencies[column]) { - to_visit.push(dependency); - } - } + result += ")"; - // Add generated columns that have no dependencies, but still might need to have their type resolved - for (idx_t i = 0; i < columns.size(); i++) { - auto &col = columns[i]; - // Not a generated column - if (!col.Generated()) { - continue; - } - // Already added to the bind_order stack - if (visited.count(i)) { - continue; - } - bind_order.push(i); + return result; } - - return bind_order; -} - -} // namespace duckdb - - - -namespace duckdb { - -CopyFunctionCatalogEntry::CopyFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, - CreateCopyFunctionInfo *info) - : StandardEntry(CatalogType::COPY_FUNCTION_ENTRY, schema, catalog, info->name), function(info->function) { -} - +}; } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/data_table.hpp -// -// -//===----------------------------------------------------------------------===// +namespace duckdb { +class AggregateFunction; +class BoundWindowExpression : public Expression { +public: + BoundWindowExpression(ExpressionType type, LogicalType return_type, unique_ptr aggregate, + unique_ptr bind_info); + //! The bound aggregate function + unique_ptr aggregate; + //! The bound function info + unique_ptr bind_info; + //! The child expressions of the main window function + vector> children; + //! The set of expressions to partition by + vector> partitions; + //! Statistics belonging to the partitions expressions + vector> partitions_stats; + //! The set of ordering clauses + vector orders; + //! Expression representing a filter, only used for aggregates + unique_ptr filter_expr; + //! True to ignore NULL values + bool ignore_nulls; + //! The window boundaries + WindowBoundary start = WindowBoundary::INVALID; + WindowBoundary end = WindowBoundary::INVALID; + unique_ptr start_expr; + unique_ptr end_expr; + //! Offset and default expressions for WINDOW_LEAD and WINDOW_LAG functions + unique_ptr offset_expr; + unique_ptr default_expr; +public: + bool IsWindow() const override { + return true; + } + bool IsFoldable() const override { + return false; + } + string ToString() const override; -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/statistics/column_statistics.hpp -// -// -//===----------------------------------------------------------------------===// + bool KeysAreCompatible(const BoundWindowExpression *other) const; + bool Equals(const BaseExpression *other) const override; + unique_ptr Copy() override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(ExpressionDeserializationState &state, FieldReader &reader); +}; +} // namespace duckdb +#include namespace duckdb { -class ColumnStatistics { +class AdaptiveFilter { public: - explicit ColumnStatistics(unique_ptr stats_p); - - unique_ptr stats; + explicit AdaptiveFilter(const Expression &expr); + explicit AdaptiveFilter(TableFilterSet *table_filters); + void AdaptRuntimeStatistics(double duration); + vector permutation; -public: - static shared_ptr CreateEmptyStats(const LogicalType &type); +private: + //! used for adaptive expression reordering + idx_t iteration_count; + idx_t swap_idx; + idx_t right_random_border; + idx_t observe_interval; + idx_t execute_interval; + double runtime_sum; + double prev_mean; + bool observe; + bool warmup; + vector swap_likeliness; + std::default_random_engine generator; }; - } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/table/column_segment.hpp -// -// -//===----------------------------------------------------------------------===// - - - - - - - -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/buffer_manager.hpp -// -// -//===----------------------------------------------------------------------===// - +namespace duckdb { +class ColumnSegment; +class LocalTableStorage; +class CollectionScanState; +class Index; +class RowGroup; +class RowGroupCollection; +class UpdateSegment; +class TableScanState; +class ColumnSegment; +class ValiditySegment; +class TableFilterSet; +class ColumnData; +struct SegmentScanState { + virtual ~SegmentScanState() { + } +}; +struct IndexScanState { + virtual ~IndexScanState() { + } +}; +typedef unordered_map buffer_handle_set_t; +struct ColumnScanState { + //! The column segment that is currently being scanned + ColumnSegment *current = nullptr; + //! The current row index of the scan + idx_t row_index = 0; + //! The internal row index (i.e. the position of the SegmentScanState) + idx_t internal_index = 0; + //! Segment scan state + unique_ptr scan_state; + //! Child states of the vector + vector child_states; + //! Whether or not InitializeState has been called for this segment + bool initialized = false; + //! If this segment has already been checked for skipping purposes + bool segment_checked = false; + //! The version of the column data that we are scanning. + //! This is used to detect if the ColumnData has been changed out from under us during a scan + //! If this is the case, we re-initialize the scan + idx_t version; + //! We initialize one SegmentScanState per segment, however, if scanning a DataChunk requires us to scan over more + //! than one Segment, we need to keep the scan states of the previous segments around + vector> previous_states; -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/buffer/block_handle.hpp -// -// -//===----------------------------------------------------------------------===// +public: + //! Move the scan state forward by "count" rows (including all child states) + void Next(idx_t count); + //! Move ONLY this state forward by "count" rows (i.e. not the child states) + void NextInternal(idx_t count); + //! Move the scan state forward by STANDARD_VECTOR_SIZE rows + void NextVector(); +}; +struct ColumnFetchState { + //! The set of pinned block handles for this set of fetches + buffer_handle_set_t handles; + //! Any child states of the fetch + vector> child_states; + BufferHandle &GetOrInsertHandle(ColumnSegment &segment); +}; +class RowGroupScanState { +public: + RowGroupScanState(CollectionScanState &parent_p) + : row_group(nullptr), vector_index(0), max_row(0), parent(parent_p) { + } + //! The current row_group we are scanning + RowGroup *row_group = nullptr; + //! The vector index within the row_group + idx_t vector_index = 0; + //! The maximum row index of this row_group scan + idx_t max_row = 0; + //! Child column scans + unique_ptr column_scans; +public: + const vector &GetColumnIds(); + TableFilterSet *GetFilters(); + AdaptiveFilter *GetAdaptiveFilter(); + idx_t GetParentMaxRow(); +private: + //! The parent scan state + CollectionScanState &parent; +}; +class CollectionScanState { +public: + CollectionScanState(TableScanState &parent_p) + : row_group_state(*this), max_row(0), batch_index(0), parent(parent_p) {}; -namespace duckdb { -class BufferHandle; -class BufferManager; -class DatabaseInstance; -class FileBuffer; + //! The row_group scan state + RowGroupScanState row_group_state; + //! The total maximum row index + idx_t max_row; + //! The current batch index + idx_t batch_index; -enum class BlockState : uint8_t { BLOCK_UNLOADED = 0, BLOCK_LOADED = 1 }; +public: + const vector &GetColumnIds(); + TableFilterSet *GetFilters(); + AdaptiveFilter *GetAdaptiveFilter(); + bool Scan(Transaction &transaction, DataChunk &result); + bool ScanCommitted(DataChunk &result, TableScanType type); -class BlockHandle { - friend struct BufferEvictionNode; - friend class BufferHandle; - friend class BufferManager; +private: + TableScanState &parent; +}; +class TableScanState { public: - BlockHandle(DatabaseInstance &db, block_id_t block_id); - BlockHandle(DatabaseInstance &db, block_id_t block_id, unique_ptr buffer, bool can_destroy, - idx_t block_size); - ~BlockHandle(); + TableScanState() : table_state(*this), local_state(*this), table_filters(nullptr) {}; - DatabaseInstance &db; + //! The underlying table scan state + CollectionScanState table_state; + //! Transaction-local scan state + CollectionScanState local_state; public: - block_id_t BlockId() { - return block_id; - } + void Initialize(vector column_ids, TableFilterSet *table_filters = nullptr); - int32_t Readers() const { - return readers; - } + const vector &GetColumnIds(); + TableFilterSet *GetFilters(); + AdaptiveFilter *GetAdaptiveFilter(); - inline bool IsSwizzled() const { - return !unswizzled; - } +private: + //! The column identifiers of the scan + vector column_ids; + //! The table filters (if any) + TableFilterSet *table_filters; + //! Adaptive filter info (if any) + unique_ptr adaptive_filter; +}; - inline void SetSwizzling(const char *unswizzler) { - unswizzled = unswizzler; - } +struct ParallelCollectionScanState { + //! The row group collection we are scanning + RowGroupCollection *collection; + RowGroup *current_row_group; + idx_t vector_index; + idx_t max_row; + idx_t batch_index; +}; -private: - static BufferHandle Load(shared_ptr &handle, unique_ptr buffer = nullptr); - unique_ptr UnloadAndTakeBlock(); - void Unload(); - bool CanUnload(); +struct ParallelTableScanState { + //! Parallel scan state for the table + ParallelCollectionScanState scan_state; + //! Parallel scan state for the transaction-local state + ParallelCollectionScanState local_state; +}; - //! The block-level lock - mutex lock; - //! Whether or not the block is loaded/unloaded - BlockState state; - //! Amount of concurrent readers - atomic readers; - //! The block id of the block - const block_id_t block_id; - //! Pointer to loaded data (if any) - unique_ptr buffer; - //! Internal eviction timestamp - atomic eviction_timestamp; - //! Whether or not the buffer can be destroyed (only used for temporary buffers) - const bool can_destroy; - //! The memory usage of the block - idx_t memory_usage; - //! Does the block contain any memory pointers? - const char *unswizzled; +class CreateIndexScanState : public TableScanState { +public: + vector> locks; + unique_lock append_lock; + SegmentLock segment_lock; }; } // namespace duckdb - //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/storage/buffer/managed_buffer.hpp +// duckdb/storage/statistics/segment_statistics.hpp // // //===----------------------------------------------------------------------===// @@ -3392,680 +3535,972 @@ class BlockHandle { namespace duckdb { -class DatabaseInstance; -//! Managed buffer is an arbitrarily-sized buffer that is at least of size >= BLOCK_SIZE -class ManagedBuffer : public FileBuffer { +class SegmentStatistics { public: - ManagedBuffer(DatabaseInstance &db, idx_t size, bool can_destroy, block_id_t id); - ManagedBuffer(DatabaseInstance &db, FileBuffer &source, bool can_destroy, block_id_t id); - - DatabaseInstance &db; - //! Whether or not the managed buffer can be freely destroyed when unpinned. - //! - If can_destroy is true, the buffer can be destroyed when unpinned and hence be unrecoverable. After being - //! destroyed, Pin() will return false. - //! - If can_destroy is false, the buffer will instead be written to a temporary file on disk when unloaded from - //! memory, and read back into memory when Pin() is called. - bool can_destroy; - //! The internal id of the buffer - block_id_t id; -}; - -} // namespace duckdb - - + SegmentStatistics(LogicalType type); + SegmentStatistics(LogicalType type, unique_ptr statistics); -namespace duckdb { -class DatabaseInstance; -class TemporaryDirectoryHandle; -struct EvictionQueue; + LogicalType type; -//! The buffer manager is in charge of handling memory management for the database. It hands out memory buffers that can -//! be used by the database internally. -class BufferManager { - friend class BufferHandle; - friend class BlockHandle; + //! Type-specific statistics of the segment + unique_ptr statistics; public: - BufferManager(DatabaseInstance &db, string temp_directory, idx_t maximum_memory); - ~BufferManager(); - - //! Register a block with the given block id in the base file - shared_ptr RegisterBlock(block_id_t block_id); - - //! Register an in-memory buffer of arbitrary size, as long as it is >= BLOCK_SIZE. can_destroy signifies whether or - //! not the buffer can be destroyed when unpinned, or whether or not it needs to be written to a temporary file so - //! it can be reloaded. The resulting buffer will already be allocated, but needs to be pinned in order to be used. - shared_ptr RegisterMemory(idx_t block_size, bool can_destroy); - - //! Convert an existing in-memory buffer into a persistent disk-backed block - shared_ptr ConvertToPersistent(BlockManager &block_manager, block_id_t block_id, - shared_ptr old_block); - - //! Allocate an in-memory buffer with a single pin. - //! The allocated memory is released when the buffer handle is destroyed. - DUCKDB_API BufferHandle Allocate(idx_t block_size); - - //! Reallocate an in-memory buffer that is pinned. - void ReAllocate(shared_ptr &handle, idx_t block_size); - - BufferHandle Pin(shared_ptr &handle); - void Unpin(shared_ptr &handle); - - void UnregisterBlock(block_id_t block_id, bool can_destroy); - - //! Set a new memory limit to the buffer manager, throws an exception if the new limit is too low and not enough - //! blocks can be evicted - void SetLimit(idx_t limit = (idx_t)-1); - - static BufferManager &GetBufferManager(ClientContext &context); - DUCKDB_API static BufferManager &GetBufferManager(DatabaseInstance &db); - - idx_t GetUsedMemory() { - return current_memory; - } - idx_t GetMaxMemory() { - return maximum_memory; - } - - const string &GetTemporaryDirectory() { - return temp_directory; - } + void Reset(); +}; - void SetTemporaryDirectory(string new_dir); +} // namespace duckdb - DUCKDB_API Allocator &GetBufferAllocator(); -private: - //! Evict blocks until the currently used memory + extra_memory fit, returns false if this was not possible - //! (i.e. not enough blocks could be evicted) - //! If the "buffer" argument is specified AND the system can find a buffer to re-use for the given allocation size - //! "buffer" will be made to point to the re-usable memory. Note that this is not guaranteed. - bool EvictBlocks(idx_t extra_memory, idx_t memory_limit, unique_ptr *buffer = nullptr); - //! Garbage collect eviction queue - void PurgeQueue(); - //! Write a temporary buffer to disk - void WriteTemporaryBuffer(ManagedBuffer &buffer); - //! Read a temporary buffer from disk - unique_ptr ReadTemporaryBuffer(block_id_t id, unique_ptr buffer = nullptr); - //! Get the path of the temporary buffer - string GetTemporaryPath(block_id_t id); - void DeleteTemporaryFile(block_id_t id); +namespace duckdb { +class BlockManager; +class ColumnData; +class DatabaseInstance; +class DataTable; +class PartialBlockManager; +struct DataTableInfo; +class ExpressionExecutor; +class RowGroupWriter; +class UpdateSegment; +class Vector; +struct ColumnCheckpointState; +struct RowGroupPointer; +struct TransactionData; +struct VersionNode; - void RequireTemporaryDirectory(); +struct RowGroupWriteData { + vector> states; + vector> statistics; +}; - void AddToEvictionQueue(shared_ptr &handle); +class RowGroup : public SegmentBase { +public: + friend class ColumnData; + friend class VersionDeleteState; - string InMemoryWarning(); +public: + static constexpr const idx_t ROW_GROUP_SIZE = STANDARD_ROW_GROUPS_SIZE; + static constexpr const idx_t ROW_GROUP_VECTOR_COUNT = ROW_GROUP_SIZE / STANDARD_VECTOR_SIZE; - static data_ptr_t BufferAllocatorAllocate(PrivateAllocatorData *private_data, idx_t size); - static void BufferAllocatorFree(PrivateAllocatorData *private_data, data_ptr_t pointer, idx_t size); - static data_ptr_t BufferAllocatorRealloc(PrivateAllocatorData *private_data, data_ptr_t pointer, idx_t old_size, - idx_t size); +public: + RowGroup(DatabaseInstance &db, BlockManager &block_manager, DataTableInfo &table_info, idx_t start, idx_t count); + RowGroup(DatabaseInstance &db, BlockManager &block_manager, DataTableInfo &table_info, + const vector &types, RowGroupPointer &&pointer); + RowGroup(RowGroup &row_group, idx_t start); + ~RowGroup(); private: //! The database instance DatabaseInstance &db; - //! The lock for changing the memory limit - mutex limit_lock; - //! The current amount of memory that is occupied by the buffer manager (in bytes) - atomic current_memory; - //! The maximum amount of memory that the buffer manager can keep (in bytes) - atomic maximum_memory; - //! The directory name where temporary files are stored - string temp_directory; - //! Lock for creating the temp handle - mutex temp_handle_lock; - //! Handle for the temporary directory - unique_ptr temp_directory_handle; - //! The lock for the set of blocks - mutex blocks_lock; - //! A mapping of block id -> BlockHandle - unordered_map> blocks; - //! Eviction queue - unique_ptr queue; - //! The temporary id used for managed buffers - atomic temporary_id; - //! Allocator associated with the buffer manager, that passes all allocations through this buffer manager - Allocator buffer_allocator; -}; -} // namespace duckdb - + //! The block manager + BlockManager &block_manager; + //! The table info of this row_group + DataTableInfo &table_info; + //! The version info of the row_group (inserted and deleted tuple info) + shared_ptr version_info; + //! The column data of the row_group + vector> columns; + //! The segment statistics for each of the columns + vector> stats; +public: + DatabaseInstance &GetDatabase() { + return db; + } + BlockManager &GetBlockManager() { + return block_manager; + } + DataTableInfo &GetTableInfo() { + return table_info; + } + idx_t GetColumnIndex(ColumnData *data) { + for (idx_t i = 0; i < columns.size(); i++) { + if (columns[i].get() == data) { + return i; + } + } + return 0; + } + unique_ptr AlterType(const LogicalType &target_type, idx_t changed_idx, ExpressionExecutor &executor, + RowGroupScanState &scan_state, DataChunk &scan_chunk); + unique_ptr AddColumn(ColumnDefinition &new_column, ExpressionExecutor &executor, + Expression *default_value, Vector &intermediate); + unique_ptr RemoveColumn(idx_t removed_column); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/function/compression_function.hpp -// -// -//===----------------------------------------------------------------------===// + void CommitDrop(); + void CommitDropColumn(idx_t index); + void InitializeEmpty(const vector &types); + //! Initialize a scan over this row_group + bool InitializeScan(RowGroupScanState &state); + bool InitializeScanWithOffset(RowGroupScanState &state, idx_t vector_offset); + //! Checks the given set of table filters against the row-group statistics. Returns false if the entire row group + //! can be skipped. + bool CheckZonemap(TableFilterSet &filters, const vector &column_ids); + //! Checks the given set of table filters against the per-segment statistics. Returns false if any segments were + //! skipped. + bool CheckZonemapSegments(RowGroupScanState &state); + void Scan(TransactionData transaction, RowGroupScanState &state, DataChunk &result); + void ScanCommitted(RowGroupScanState &state, DataChunk &result, TableScanType type); + idx_t GetSelVector(TransactionData transaction, idx_t vector_idx, SelectionVector &sel_vector, idx_t max_count); + idx_t GetCommittedSelVector(transaction_t start_time, transaction_t transaction_id, idx_t vector_idx, + SelectionVector &sel_vector, idx_t max_count); + //! For a specific row, returns true if it should be used for the transaction and false otherwise. + bool Fetch(TransactionData transaction, idx_t row); + //! Fetch a specific row from the row_group and insert it into the result at the specified index + void FetchRow(TransactionData transaction, ColumnFetchState &state, const vector &column_ids, + row_t row_id, DataChunk &result, idx_t result_idx); + //! Append count rows to the version info + void AppendVersionInfo(TransactionData transaction, idx_t count); + //! Commit a previous append made by RowGroup::AppendVersionInfo + void CommitAppend(transaction_t commit_id, idx_t start, idx_t count); + //! Revert a previous append made by RowGroup::AppendVersionInfo + void RevertAppend(idx_t start); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/map.hpp -// -// -//===----------------------------------------------------------------------===// + //! Delete the given set of rows in the version manager + idx_t Delete(TransactionData transaction, DataTable *table, row_t *row_ids, idx_t count); + RowGroupWriteData WriteToDisk(PartialBlockManager &manager, const vector &compression_types); + RowGroupPointer Checkpoint(RowGroupWriter &writer, vector> &global_stats); + static void Serialize(RowGroupPointer &pointer, Serializer &serializer); + static RowGroupPointer Deserialize(Deserializer &source, const ColumnList &columns); + void InitializeAppend(RowGroupAppendState &append_state); + void Append(RowGroupAppendState &append_state, DataChunk &chunk, idx_t append_count); -#include + void Update(TransactionData transaction, DataChunk &updates, row_t *ids, idx_t offset, idx_t count, + const vector &column_ids); + //! Update a single column; corresponds to DataTable::UpdateColumn + //! This method should only be called from the WAL + void UpdateColumn(TransactionData transaction, DataChunk &updates, Vector &row_ids, + const vector &column_path); -namespace duckdb { -using std::map; -using std::multimap; -} // namespace duckdb + void MergeStatistics(idx_t column_idx, const BaseStatistics &other); + void MergeIntoStatistics(idx_t column_idx, BaseStatistics &other); + unique_ptr GetStatistics(idx_t column_idx); + void GetStorageInfo(idx_t row_group_index, vector> &result); + void Verify(); -namespace duckdb { -class DatabaseInstance; -class ColumnData; -class ColumnDataCheckpointer; -class ColumnSegment; -class SegmentStatistics; + void NextVector(RowGroupScanState &state); -struct ColumnFetchState; -struct ColumnScanState; -struct SegmentScanState; +private: + ChunkInfo *GetChunkInfo(idx_t vector_idx); -struct AnalyzeState { - virtual ~AnalyzeState() { - } -}; + template + void TemplatedScan(TransactionData transaction, RowGroupScanState &state, DataChunk &result); -struct CompressionState { - virtual ~CompressionState() { - } -}; + static void CheckpointDeletes(VersionNode *versions, Serializer &serializer); + static shared_ptr DeserializeDeletes(Deserializer &source); -struct CompressedSegmentState { - virtual ~CompressedSegmentState() { - } +private: + mutex row_group_lock; + mutex stats_lock; }; -struct UncompressedCompressState : public CompressionState { - explicit UncompressedCompressState(ColumnDataCheckpointer &checkpointer); - - ColumnDataCheckpointer &checkpointer; - unique_ptr current_segment; +struct VersionNode { + unique_ptr info[RowGroup::ROW_GROUP_VECTOR_COUNT]; - virtual void CreateEmptySegment(idx_t row_start); - void FlushSegment(idx_t segment_size); - void Finalize(idx_t segment_size); + void SetStart(idx_t start); }; -//===--------------------------------------------------------------------===// -// Analyze -//===--------------------------------------------------------------------===// -//! The analyze functions are used to determine whether or not to use this compression method -//! The system first determines the potential compression methods to use based on the physical type of the column -//! After that the following steps are taken: -//! 1. The init_analyze is called to initialize the analyze state of every candidate compression method -//! 2. The analyze method is called with all of the input data in the order in which it must be stored. -//! analyze can return "false". In that case, the compression method is taken out of consideration early. -//! 3. The final_analyze method is called, which should return a score for the compression method - -//! The system then decides which compression function to use based on the analyzed score (returned from final_analyze) -typedef unique_ptr (*compression_init_analyze_t)(ColumnData &col_data, PhysicalType type); -typedef bool (*compression_analyze_t)(AnalyzeState &state, Vector &input, idx_t count); -typedef idx_t (*compression_final_analyze_t)(AnalyzeState &state); - -//===--------------------------------------------------------------------===// -// Compress -//===--------------------------------------------------------------------===// -typedef unique_ptr (*compression_init_compression_t)(ColumnDataCheckpointer &checkpointer, - unique_ptr state); -typedef void (*compression_compress_data_t)(CompressionState &state, Vector &scan_vector, idx_t count); -typedef void (*compression_compress_finalize_t)(CompressionState &state); - -//===--------------------------------------------------------------------===// -// Uncompress / Scan -//===--------------------------------------------------------------------===// -typedef unique_ptr (*compression_init_segment_scan_t)(ColumnSegment &segment); -typedef void (*compression_scan_vector_t)(ColumnSegment &segment, ColumnScanState &state, idx_t scan_count, - Vector &result); -typedef void (*compression_scan_partial_t)(ColumnSegment &segment, ColumnScanState &state, idx_t scan_count, - Vector &result, idx_t result_offset); -typedef void (*compression_fetch_row_t)(ColumnSegment &segment, ColumnFetchState &state, row_t row_id, Vector &result, - idx_t result_idx); -typedef void (*compression_skip_t)(ColumnSegment &segment, ColumnScanState &state, idx_t skip_count); - -//===--------------------------------------------------------------------===// -// Append (optional) -//===--------------------------------------------------------------------===// -typedef unique_ptr (*compression_init_segment_t)(ColumnSegment &segment, block_id_t block_id); -typedef idx_t (*compression_append_t)(ColumnSegment &segment, SegmentStatistics &stats, UnifiedVectorFormat &data, - idx_t offset, idx_t count); -typedef idx_t (*compression_finalize_append_t)(ColumnSegment &segment, SegmentStatistics &stats); -typedef void (*compression_revert_append_t)(ColumnSegment &segment, idx_t start_row); - -class CompressionFunction { -public: - CompressionFunction(CompressionType type, PhysicalType data_type, compression_init_analyze_t init_analyze, - compression_analyze_t analyze, compression_final_analyze_t final_analyze, - compression_init_compression_t init_compression, compression_compress_data_t compress, - compression_compress_finalize_t compress_finalize, compression_init_segment_scan_t init_scan, - compression_scan_vector_t scan_vector, compression_scan_partial_t scan_partial, - compression_fetch_row_t fetch_row, compression_skip_t skip, - compression_init_segment_t init_segment = nullptr, compression_append_t append = nullptr, - compression_finalize_append_t finalize_append = nullptr, - compression_revert_append_t revert_append = nullptr) - : type(type), data_type(data_type), init_analyze(init_analyze), analyze(analyze), final_analyze(final_analyze), - init_compression(init_compression), compress(compress), compress_finalize(compress_finalize), - init_scan(init_scan), scan_vector(scan_vector), scan_partial(scan_partial), fetch_row(fetch_row), skip(skip), - init_segment(init_segment), append(append), finalize_append(finalize_append), revert_append(revert_append) { - } - - //! Compression type - CompressionType type; - //! The data type this function can compress - PhysicalType data_type; - - //! Analyze step: determine which compression function is the most effective - //! init_analyze is called once to set up the analyze state - compression_init_analyze_t init_analyze; - //! analyze is called several times (once per vector in the row group) - //! analyze should return true, unless compression is no longer possible with this compression method - //! in that case false should be returned - compression_analyze_t analyze; - //! final_analyze should return the score of the compression function - //! ideally this is the exact number of bytes required to store the data - //! this is not required/enforced: it can be an estimate as well - compression_final_analyze_t final_analyze; +} // namespace duckdb - //! Compression step: actually compress the data - //! init_compression is called once to set up the comperssion state - compression_init_compression_t init_compression; - //! compress is called several times (once per vector in the row group) - compression_compress_data_t compress; - //! compress_finalize is called after - compression_compress_finalize_t compress_finalize; - //! init_scan is called to set up the scan state - compression_init_segment_scan_t init_scan; - //! scan_vector scans an entire vector using the scan state - compression_scan_vector_t scan_vector; - //! scan_partial scans a subset of a vector - //! this can request > vector_size as well - //! this is used if a vector crosses segment boundaries, or for child columns of lists - compression_scan_partial_t scan_partial; - //! fetch an individual row from the compressed vector - //! used for index lookups - compression_fetch_row_t fetch_row; - //! Skip forward in the compressed segment - compression_skip_t skip; - // Append functions - //! This only really needs to be defined for uncompressed segments +namespace duckdb { - //! Initialize a compressed segment (optional) - compression_init_segment_t init_segment; - //! Append to the compressed segment (optional) - compression_append_t append; - //! Finalize an append to the segment - compression_finalize_append_t finalize_append; - //! Revert append (optional) - compression_revert_append_t revert_append; +struct DataPointer { + uint64_t row_start; + uint64_t tuple_count; + BlockPointer block_pointer; + CompressionType compression_type; + //! Type-specific statistics of the segment + unique_ptr statistics; }; -//! The set of compression functions -struct CompressionFunctionSet { - map> functions; +struct RowGroupPointer { + uint64_t row_start; + uint64_t tuple_count; + //! The data pointers of the column segments stored in the row group + vector data_pointers; + //! The per-column statistics of the row group + vector> statistics; + //! The versions information of the row group (if any) + shared_ptr versions; }; } // namespace duckdb namespace duckdb { -class ColumnSegment; -class BlockManager; -class ColumnSegment; -class ColumnData; -class DatabaseInstance; -class Transaction; class BaseStatistics; -class UpdateSegment; -class TableFilter; -struct ColumnFetchState; -struct ColumnScanState; -struct ColumnAppendState; - -enum class ColumnSegmentType : uint8_t { TRANSIENT, PERSISTENT }; -//! TableFilter represents a filter pushed down into the table scan. -class ColumnSegment : public SegmentBase { +class PersistentTableData { public: - ~ColumnSegment() override; + explicit PersistentTableData(idx_t column_count); + ~PersistentTableData(); - //! The database instance - DatabaseInstance &db; - //! The type stored in the column - LogicalType type; - //! The size of the type - idx_t type_size; - //! The column segment type (transient or persistent) - ColumnSegmentType segment_type; - //! The compression function - CompressionFunction *function; - //! The statistics for the segment - SegmentStatistics stats; - //! The block that this segment relates to - shared_ptr block; + vector row_groups; + vector> column_stats; +}; - static unique_ptr CreatePersistentSegment(DatabaseInstance &db, block_id_t id, idx_t offset, - const LogicalType &type_p, idx_t start, idx_t count, - CompressionType compression_type, - unique_ptr statistics); - static unique_ptr CreateTransientSegment(DatabaseInstance &db, const LogicalType &type, idx_t start); +} // namespace duckdb -public: - void InitializeScan(ColumnScanState &state); - //! Scan one vector from this segment - void Scan(ColumnScanState &state, idx_t scan_count, Vector &result, idx_t result_offset, bool entire_vector); - //! Fetch a value of the specific row id and append it to the result - void FetchRow(ColumnFetchState &state, row_t row_id, Vector &result, idx_t result_idx); - static idx_t FilterSelection(SelectionVector &sel, Vector &result, const TableFilter &filter, - idx_t &approved_tuple_count, ValidityMask &mask); - //! Skip a scan forward to the row_index specified in the scan state - void Skip(ColumnScanState &state); - //! Initialize an append of this segment. Appends are only supported on transient segments. - void InitializeAppend(ColumnAppendState &state); - //! Appends a (part of) vector to the segment, returns the amount of entries successfully appended - idx_t Append(ColumnAppendState &state, UnifiedVectorFormat &data, idx_t offset, idx_t count); - //! Finalize the segment for appending - no more appends can follow on this segment - //! The segment should be compacted as much as possible - //! Returns the number of bytes occupied within the segment - idx_t FinalizeAppend(); - //! Revert an append made to this segment - void RevertAppend(idx_t start_row); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/table_index_list.hpp +// +// +//===----------------------------------------------------------------------===// - //! Convert a transient in-memory segment into a persistent segment blocked by an on-disk block. - //! Only used during checkpointing. - void ConvertToPersistent(block_id_t block_id); - //! Convert a transient in-memory segment into a persistent segment blocked by an on-disk block. - void ConvertToPersistent(shared_ptr block, block_id_t block_id, uint32_t offset_in_block); - block_id_t GetBlockId() { - D_ASSERT(segment_type == ColumnSegmentType::PERSISTENT); - return block_id; - } - idx_t GetBlockOffset() { - D_ASSERT(segment_type == ColumnSegmentType::PERSISTENT || offset == 0); - return offset; - } - idx_t GetRelativeIndex(idx_t row_index) { - D_ASSERT(row_index >= this->start); - D_ASSERT(row_index <= this->start + this->count); - return row_index - this->start; - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/index.hpp +// +// +//===----------------------------------------------------------------------===// - CompressedSegmentState *GetSegmentState() { - return segment_state.get(); - } -public: - ColumnSegment(DatabaseInstance &db, LogicalType type, ColumnSegmentType segment_type, idx_t start, idx_t count, - CompressionFunction *function, unique_ptr statistics, block_id_t block_id, - idx_t offset); -private: - void Scan(ColumnScanState &state, idx_t scan_count, Vector &result); - void ScanPartial(ColumnScanState &state, idx_t scan_count, Vector &result, idx_t result_offset); -private: - //! The block id that this segment relates to (persistent segment only) - block_id_t block_id; - //! The offset into the block (persistent segment only) - idx_t offset; - //! Storage associated with the compressed segment - unique_ptr segment_state; -}; -} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/sort/sort.hpp +// +// +//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/storage/table_statistics.hpp +// duckdb/common/sort/sorted_block.hpp // // //===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/fast_mem.hpp +// +// +//===----------------------------------------------------------------------===// -namespace duckdb { - -struct TableStatistics { - idx_t estimated_cardinality; -}; -} // namespace duckdb +template +static inline void MemcpyFixed(void *dest, const void *src) { + memcpy(dest, src, SIZE); +} +template +static inline int MemcmpFixed(const void *str1, const void *str2) { + return memcmp(str1, str2, SIZE); +} namespace duckdb { -class ClientContext; -class ColumnDefinition; -class DataTable; -class RowGroup; -class StorageManager; -class TableCatalogEntry; -class Transaction; -class WriteAheadLog; -class TableDataWriter; -struct DataTableInfo { - DataTableInfo(DatabaseInstance &db, string schema, string table) - : db(db), cardinality(0), schema(move(schema)), table(move(table)) { - } - - //! The database instance of the table - DatabaseInstance &db; - //! The amount of elements in the table. Note that this number signifies the amount of COMMITTED entries in the - //! table. It can be inaccurate inside of transactions. More work is needed to properly support that. - atomic cardinality; - // schema of the table - string schema; - // name of the table - string table; - - TableIndex indexes; - - bool IsTemporary() { - return schema == TEMP_SCHEMA; +//! This templated memcpy is significantly faster than std::memcpy, +//! but only when you are calling memcpy with a const size in a loop. +//! For instance `while () { memcpy(, , const_size); ... }` +static inline void FastMemcpy(void *dest, const void *src, const size_t size) { + // LCOV_EXCL_START + switch (size) { + case 0: + return; + case 1: + return MemcpyFixed<1>(dest, src); + case 2: + return MemcpyFixed<2>(dest, src); + case 3: + return MemcpyFixed<3>(dest, src); + case 4: + return MemcpyFixed<4>(dest, src); + case 5: + return MemcpyFixed<5>(dest, src); + case 6: + return MemcpyFixed<6>(dest, src); + case 7: + return MemcpyFixed<7>(dest, src); + case 8: + return MemcpyFixed<8>(dest, src); + case 9: + return MemcpyFixed<9>(dest, src); + case 10: + return MemcpyFixed<10>(dest, src); + case 11: + return MemcpyFixed<11>(dest, src); + case 12: + return MemcpyFixed<12>(dest, src); + case 13: + return MemcpyFixed<13>(dest, src); + case 14: + return MemcpyFixed<14>(dest, src); + case 15: + return MemcpyFixed<15>(dest, src); + case 16: + return MemcpyFixed<16>(dest, src); + case 17: + return MemcpyFixed<17>(dest, src); + case 18: + return MemcpyFixed<18>(dest, src); + case 19: + return MemcpyFixed<19>(dest, src); + case 20: + return MemcpyFixed<20>(dest, src); + case 21: + return MemcpyFixed<21>(dest, src); + case 22: + return MemcpyFixed<22>(dest, src); + case 23: + return MemcpyFixed<23>(dest, src); + case 24: + return MemcpyFixed<24>(dest, src); + case 25: + return MemcpyFixed<25>(dest, src); + case 26: + return MemcpyFixed<26>(dest, src); + case 27: + return MemcpyFixed<27>(dest, src); + case 28: + return MemcpyFixed<28>(dest, src); + case 29: + return MemcpyFixed<29>(dest, src); + case 30: + return MemcpyFixed<30>(dest, src); + case 31: + return MemcpyFixed<31>(dest, src); + case 32: + return MemcpyFixed<32>(dest, src); + case 33: + return MemcpyFixed<33>(dest, src); + case 34: + return MemcpyFixed<34>(dest, src); + case 35: + return MemcpyFixed<35>(dest, src); + case 36: + return MemcpyFixed<36>(dest, src); + case 37: + return MemcpyFixed<37>(dest, src); + case 38: + return MemcpyFixed<38>(dest, src); + case 39: + return MemcpyFixed<39>(dest, src); + case 40: + return MemcpyFixed<40>(dest, src); + case 41: + return MemcpyFixed<41>(dest, src); + case 42: + return MemcpyFixed<42>(dest, src); + case 43: + return MemcpyFixed<43>(dest, src); + case 44: + return MemcpyFixed<44>(dest, src); + case 45: + return MemcpyFixed<45>(dest, src); + case 46: + return MemcpyFixed<46>(dest, src); + case 47: + return MemcpyFixed<47>(dest, src); + case 48: + return MemcpyFixed<48>(dest, src); + case 49: + return MemcpyFixed<49>(dest, src); + case 50: + return MemcpyFixed<50>(dest, src); + case 51: + return MemcpyFixed<51>(dest, src); + case 52: + return MemcpyFixed<52>(dest, src); + case 53: + return MemcpyFixed<53>(dest, src); + case 54: + return MemcpyFixed<54>(dest, src); + case 55: + return MemcpyFixed<55>(dest, src); + case 56: + return MemcpyFixed<56>(dest, src); + case 57: + return MemcpyFixed<57>(dest, src); + case 58: + return MemcpyFixed<58>(dest, src); + case 59: + return MemcpyFixed<59>(dest, src); + case 60: + return MemcpyFixed<60>(dest, src); + case 61: + return MemcpyFixed<61>(dest, src); + case 62: + return MemcpyFixed<62>(dest, src); + case 63: + return MemcpyFixed<63>(dest, src); + case 64: + return MemcpyFixed<64>(dest, src); + case 65: + return MemcpyFixed<65>(dest, src); + case 66: + return MemcpyFixed<66>(dest, src); + case 67: + return MemcpyFixed<67>(dest, src); + case 68: + return MemcpyFixed<68>(dest, src); + case 69: + return MemcpyFixed<69>(dest, src); + case 70: + return MemcpyFixed<70>(dest, src); + case 71: + return MemcpyFixed<71>(dest, src); + case 72: + return MemcpyFixed<72>(dest, src); + case 73: + return MemcpyFixed<73>(dest, src); + case 74: + return MemcpyFixed<74>(dest, src); + case 75: + return MemcpyFixed<75>(dest, src); + case 76: + return MemcpyFixed<76>(dest, src); + case 77: + return MemcpyFixed<77>(dest, src); + case 78: + return MemcpyFixed<78>(dest, src); + case 79: + return MemcpyFixed<79>(dest, src); + case 80: + return MemcpyFixed<80>(dest, src); + case 81: + return MemcpyFixed<81>(dest, src); + case 82: + return MemcpyFixed<82>(dest, src); + case 83: + return MemcpyFixed<83>(dest, src); + case 84: + return MemcpyFixed<84>(dest, src); + case 85: + return MemcpyFixed<85>(dest, src); + case 86: + return MemcpyFixed<86>(dest, src); + case 87: + return MemcpyFixed<87>(dest, src); + case 88: + return MemcpyFixed<88>(dest, src); + case 89: + return MemcpyFixed<89>(dest, src); + case 90: + return MemcpyFixed<90>(dest, src); + case 91: + return MemcpyFixed<91>(dest, src); + case 92: + return MemcpyFixed<92>(dest, src); + case 93: + return MemcpyFixed<93>(dest, src); + case 94: + return MemcpyFixed<94>(dest, src); + case 95: + return MemcpyFixed<95>(dest, src); + case 96: + return MemcpyFixed<96>(dest, src); + case 97: + return MemcpyFixed<97>(dest, src); + case 98: + return MemcpyFixed<98>(dest, src); + case 99: + return MemcpyFixed<99>(dest, src); + case 100: + return MemcpyFixed<100>(dest, src); + case 101: + return MemcpyFixed<101>(dest, src); + case 102: + return MemcpyFixed<102>(dest, src); + case 103: + return MemcpyFixed<103>(dest, src); + case 104: + return MemcpyFixed<104>(dest, src); + case 105: + return MemcpyFixed<105>(dest, src); + case 106: + return MemcpyFixed<106>(dest, src); + case 107: + return MemcpyFixed<107>(dest, src); + case 108: + return MemcpyFixed<108>(dest, src); + case 109: + return MemcpyFixed<109>(dest, src); + case 110: + return MemcpyFixed<110>(dest, src); + case 111: + return MemcpyFixed<111>(dest, src); + case 112: + return MemcpyFixed<112>(dest, src); + case 113: + return MemcpyFixed<113>(dest, src); + case 114: + return MemcpyFixed<114>(dest, src); + case 115: + return MemcpyFixed<115>(dest, src); + case 116: + return MemcpyFixed<116>(dest, src); + case 117: + return MemcpyFixed<117>(dest, src); + case 118: + return MemcpyFixed<118>(dest, src); + case 119: + return MemcpyFixed<119>(dest, src); + case 120: + return MemcpyFixed<120>(dest, src); + case 121: + return MemcpyFixed<121>(dest, src); + case 122: + return MemcpyFixed<122>(dest, src); + case 123: + return MemcpyFixed<123>(dest, src); + case 124: + return MemcpyFixed<124>(dest, src); + case 125: + return MemcpyFixed<125>(dest, src); + case 126: + return MemcpyFixed<126>(dest, src); + case 127: + return MemcpyFixed<127>(dest, src); + case 128: + return MemcpyFixed<128>(dest, src); + case 129: + return MemcpyFixed<129>(dest, src); + case 130: + return MemcpyFixed<130>(dest, src); + case 131: + return MemcpyFixed<131>(dest, src); + case 132: + return MemcpyFixed<132>(dest, src); + case 133: + return MemcpyFixed<133>(dest, src); + case 134: + return MemcpyFixed<134>(dest, src); + case 135: + return MemcpyFixed<135>(dest, src); + case 136: + return MemcpyFixed<136>(dest, src); + case 137: + return MemcpyFixed<137>(dest, src); + case 138: + return MemcpyFixed<138>(dest, src); + case 139: + return MemcpyFixed<139>(dest, src); + case 140: + return MemcpyFixed<140>(dest, src); + case 141: + return MemcpyFixed<141>(dest, src); + case 142: + return MemcpyFixed<142>(dest, src); + case 143: + return MemcpyFixed<143>(dest, src); + case 144: + return MemcpyFixed<144>(dest, src); + case 145: + return MemcpyFixed<145>(dest, src); + case 146: + return MemcpyFixed<146>(dest, src); + case 147: + return MemcpyFixed<147>(dest, src); + case 148: + return MemcpyFixed<148>(dest, src); + case 149: + return MemcpyFixed<149>(dest, src); + case 150: + return MemcpyFixed<150>(dest, src); + case 151: + return MemcpyFixed<151>(dest, src); + case 152: + return MemcpyFixed<152>(dest, src); + case 153: + return MemcpyFixed<153>(dest, src); + case 154: + return MemcpyFixed<154>(dest, src); + case 155: + return MemcpyFixed<155>(dest, src); + case 156: + return MemcpyFixed<156>(dest, src); + case 157: + return MemcpyFixed<157>(dest, src); + case 158: + return MemcpyFixed<158>(dest, src); + case 159: + return MemcpyFixed<159>(dest, src); + case 160: + return MemcpyFixed<160>(dest, src); + case 161: + return MemcpyFixed<161>(dest, src); + case 162: + return MemcpyFixed<162>(dest, src); + case 163: + return MemcpyFixed<163>(dest, src); + case 164: + return MemcpyFixed<164>(dest, src); + case 165: + return MemcpyFixed<165>(dest, src); + case 166: + return MemcpyFixed<166>(dest, src); + case 167: + return MemcpyFixed<167>(dest, src); + case 168: + return MemcpyFixed<168>(dest, src); + case 169: + return MemcpyFixed<169>(dest, src); + case 170: + return MemcpyFixed<170>(dest, src); + case 171: + return MemcpyFixed<171>(dest, src); + case 172: + return MemcpyFixed<172>(dest, src); + case 173: + return MemcpyFixed<173>(dest, src); + case 174: + return MemcpyFixed<174>(dest, src); + case 175: + return MemcpyFixed<175>(dest, src); + case 176: + return MemcpyFixed<176>(dest, src); + case 177: + return MemcpyFixed<177>(dest, src); + case 178: + return MemcpyFixed<178>(dest, src); + case 179: + return MemcpyFixed<179>(dest, src); + case 180: + return MemcpyFixed<180>(dest, src); + case 181: + return MemcpyFixed<181>(dest, src); + case 182: + return MemcpyFixed<182>(dest, src); + case 183: + return MemcpyFixed<183>(dest, src); + case 184: + return MemcpyFixed<184>(dest, src); + case 185: + return MemcpyFixed<185>(dest, src); + case 186: + return MemcpyFixed<186>(dest, src); + case 187: + return MemcpyFixed<187>(dest, src); + case 188: + return MemcpyFixed<188>(dest, src); + case 189: + return MemcpyFixed<189>(dest, src); + case 190: + return MemcpyFixed<190>(dest, src); + case 191: + return MemcpyFixed<191>(dest, src); + case 192: + return MemcpyFixed<192>(dest, src); + case 193: + return MemcpyFixed<193>(dest, src); + case 194: + return MemcpyFixed<194>(dest, src); + case 195: + return MemcpyFixed<195>(dest, src); + case 196: + return MemcpyFixed<196>(dest, src); + case 197: + return MemcpyFixed<197>(dest, src); + case 198: + return MemcpyFixed<198>(dest, src); + case 199: + return MemcpyFixed<199>(dest, src); + case 200: + return MemcpyFixed<200>(dest, src); + case 201: + return MemcpyFixed<201>(dest, src); + case 202: + return MemcpyFixed<202>(dest, src); + case 203: + return MemcpyFixed<203>(dest, src); + case 204: + return MemcpyFixed<204>(dest, src); + case 205: + return MemcpyFixed<205>(dest, src); + case 206: + return MemcpyFixed<206>(dest, src); + case 207: + return MemcpyFixed<207>(dest, src); + case 208: + return MemcpyFixed<208>(dest, src); + case 209: + return MemcpyFixed<209>(dest, src); + case 210: + return MemcpyFixed<210>(dest, src); + case 211: + return MemcpyFixed<211>(dest, src); + case 212: + return MemcpyFixed<212>(dest, src); + case 213: + return MemcpyFixed<213>(dest, src); + case 214: + return MemcpyFixed<214>(dest, src); + case 215: + return MemcpyFixed<215>(dest, src); + case 216: + return MemcpyFixed<216>(dest, src); + case 217: + return MemcpyFixed<217>(dest, src); + case 218: + return MemcpyFixed<218>(dest, src); + case 219: + return MemcpyFixed<219>(dest, src); + case 220: + return MemcpyFixed<220>(dest, src); + case 221: + return MemcpyFixed<221>(dest, src); + case 222: + return MemcpyFixed<222>(dest, src); + case 223: + return MemcpyFixed<223>(dest, src); + case 224: + return MemcpyFixed<224>(dest, src); + case 225: + return MemcpyFixed<225>(dest, src); + case 226: + return MemcpyFixed<226>(dest, src); + case 227: + return MemcpyFixed<227>(dest, src); + case 228: + return MemcpyFixed<228>(dest, src); + case 229: + return MemcpyFixed<229>(dest, src); + case 230: + return MemcpyFixed<230>(dest, src); + case 231: + return MemcpyFixed<231>(dest, src); + case 232: + return MemcpyFixed<232>(dest, src); + case 233: + return MemcpyFixed<233>(dest, src); + case 234: + return MemcpyFixed<234>(dest, src); + case 235: + return MemcpyFixed<235>(dest, src); + case 236: + return MemcpyFixed<236>(dest, src); + case 237: + return MemcpyFixed<237>(dest, src); + case 238: + return MemcpyFixed<238>(dest, src); + case 239: + return MemcpyFixed<239>(dest, src); + case 240: + return MemcpyFixed<240>(dest, src); + case 241: + return MemcpyFixed<241>(dest, src); + case 242: + return MemcpyFixed<242>(dest, src); + case 243: + return MemcpyFixed<243>(dest, src); + case 244: + return MemcpyFixed<244>(dest, src); + case 245: + return MemcpyFixed<245>(dest, src); + case 246: + return MemcpyFixed<246>(dest, src); + case 247: + return MemcpyFixed<247>(dest, src); + case 248: + return MemcpyFixed<248>(dest, src); + case 249: + return MemcpyFixed<249>(dest, src); + case 250: + return MemcpyFixed<250>(dest, src); + case 251: + return MemcpyFixed<251>(dest, src); + case 252: + return MemcpyFixed<252>(dest, src); + case 253: + return MemcpyFixed<253>(dest, src); + case 254: + return MemcpyFixed<254>(dest, src); + case 255: + return MemcpyFixed<255>(dest, src); + case 256: + return MemcpyFixed<256>(dest, src); + default: + memcpy(dest, src, size); } -}; - -struct ParallelTableScanState { - RowGroup *current_row_group; - idx_t vector_index; - idx_t max_row; - LocalScanState local_state; - bool transaction_local_data; -}; - -//! DataTable represents a physical table on disk -class DataTable { -public: - //! Constructs a new data table from an (optional) set of persistent segments - DataTable(DatabaseInstance &db, const string &schema, const string &table, - vector column_definitions_p, unique_ptr data = nullptr); - //! Constructs a DataTable as a delta on an existing data table with a newly added column - DataTable(ClientContext &context, DataTable &parent, ColumnDefinition &new_column, Expression *default_value); - //! Constructs a DataTable as a delta on an existing data table but with one column removed - DataTable(ClientContext &context, DataTable &parent, idx_t removed_column); - //! Constructs a DataTable as a delta on an existing data table but with one column changed type - DataTable(ClientContext &context, DataTable &parent, idx_t changed_idx, const LogicalType &target_type, - vector bound_columns, Expression &cast_expr); - //! Constructs a DataTable as a delta on an existing data table but with one column added new constraint - DataTable(ClientContext &context, DataTable &parent, unique_ptr constraint); - - shared_ptr info; - - vector column_definitions; - - //! A reference to the database instance - DatabaseInstance &db; - -public: - //! Returns a list of types of the table - vector GetTypes(); - - void InitializeScan(TableScanState &state, const vector &column_ids, - TableFilterSet *table_filter = nullptr); - void InitializeScan(Transaction &transaction, TableScanState &state, const vector &column_ids, - TableFilterSet *table_filters = nullptr); - - //! Returns the maximum amount of threads that should be assigned to scan this data table - idx_t MaxThreads(ClientContext &context); - void InitializeParallelScan(ClientContext &context, ParallelTableScanState &state); - bool NextParallelScan(ClientContext &context, ParallelTableScanState &state, TableScanState &scan_state, - const vector &column_ids); - - //! Scans up to STANDARD_VECTOR_SIZE elements from the table starting - //! from offset and store them in result. Offset is incremented with how many - //! elements were returned. - //! Returns true if all pushed down filters were executed during data fetching - void Scan(Transaction &transaction, DataChunk &result, TableScanState &state, vector &column_ids); - - //! Fetch data from the specific row identifiers from the base table - void Fetch(Transaction &transaction, DataChunk &result, const vector &column_ids, Vector &row_ids, - idx_t fetch_count, ColumnFetchState &state); - - //! Append a DataChunk to the table. Throws an exception if the columns don't match the tables' columns. - void Append(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); - //! Delete the entries with the specified row identifier from the table - idx_t Delete(TableCatalogEntry &table, ClientContext &context, Vector &row_ids, idx_t count); - //! Update the entries with the specified row identifier from the table - void Update(TableCatalogEntry &table, ClientContext &context, Vector &row_ids, const vector &column_ids, - DataChunk &data); - //! Update a single (sub-)column along a column path - //! The column_path vector is a *path* towards a column within the table - //! i.e. if we have a table with a single column S STRUCT(A INT, B INT) - //! and we update the validity mask of "S.B" - //! the column path is: - //! 0 (first column of table) - //! -> 1 (second subcolumn of struct) - //! -> 0 (first subcolumn of INT) - //! This method should only be used from the WAL replay. It does not verify update constraints. - void UpdateColumn(TableCatalogEntry &table, ClientContext &context, Vector &row_ids, - const vector &column_path, DataChunk &updates); - - //! Add an index to the DataTable - void AddIndex(unique_ptr index, const vector> &expressions); - - //! Begin appending structs to this table, obtaining necessary locks, etc - void InitializeAppend(Transaction &transaction, TableAppendState &state, idx_t append_count); - //! Append a chunk to the table using the AppendState obtained from BeginAppend - void Append(Transaction &transaction, DataChunk &chunk, TableAppendState &state); - //! Commit the append - void CommitAppend(transaction_t commit_id, idx_t row_start, idx_t count); - //! Write a segment of the table to the WAL - void WriteToLog(WriteAheadLog &log, idx_t row_start, idx_t count); - //! Revert a set of appends made by the given AppendState, used to revert appends in the event of an error during - //! commit (e.g. because of an I/O exception) - void RevertAppend(idx_t start_row, idx_t count); - void RevertAppendInternal(idx_t start_row, idx_t count); - - void ScanTableSegment(idx_t start_row, idx_t count, const std::function &function); - - //! Append a chunk with the row ids [row_start, ..., row_start + chunk.size()] to all indexes of the table, returns - //! whether or not the append succeeded - bool AppendToIndexes(TableAppendState &state, DataChunk &chunk, row_t row_start); - //! Remove a chunk with the row ids [row_start, ..., row_start + chunk.size()] from all indexes of the table - void RemoveFromIndexes(TableAppendState &state, DataChunk &chunk, row_t row_start); - //! Remove the chunk with the specified set of row identifiers from all indexes of the table - void RemoveFromIndexes(TableAppendState &state, DataChunk &chunk, Vector &row_identifiers); - //! Remove the row identifiers from all the indexes of the table - void RemoveFromIndexes(Vector &row_identifiers, idx_t count); + // LCOV_EXCL_STOP +} - void SetAsRoot() { - this->is_root = true; +//! This templated memcmp is significantly faster than std::memcmp, +//! but only when you are calling memcmp with a const size in a loop. +//! For instance `while () { memcmp(, , const_size); ... }` +static inline int FastMemcmp(const void *str1, const void *str2, const size_t size) { + // LCOV_EXCL_START + switch (size) { + case 0: + return 0; + case 1: + return MemcmpFixed<1>(str1, str2); + case 2: + return MemcmpFixed<2>(str1, str2); + case 3: + return MemcmpFixed<3>(str1, str2); + case 4: + return MemcmpFixed<4>(str1, str2); + case 5: + return MemcmpFixed<5>(str1, str2); + case 6: + return MemcmpFixed<6>(str1, str2); + case 7: + return MemcmpFixed<7>(str1, str2); + case 8: + return MemcmpFixed<8>(str1, str2); + case 9: + return MemcmpFixed<9>(str1, str2); + case 10: + return MemcmpFixed<10>(str1, str2); + case 11: + return MemcmpFixed<11>(str1, str2); + case 12: + return MemcmpFixed<12>(str1, str2); + case 13: + return MemcmpFixed<13>(str1, str2); + case 14: + return MemcmpFixed<14>(str1, str2); + case 15: + return MemcmpFixed<15>(str1, str2); + case 16: + return MemcmpFixed<16>(str1, str2); + case 17: + return MemcmpFixed<17>(str1, str2); + case 18: + return MemcmpFixed<18>(str1, str2); + case 19: + return MemcmpFixed<19>(str1, str2); + case 20: + return MemcmpFixed<20>(str1, str2); + case 21: + return MemcmpFixed<21>(str1, str2); + case 22: + return MemcmpFixed<22>(str1, str2); + case 23: + return MemcmpFixed<23>(str1, str2); + case 24: + return MemcmpFixed<24>(str1, str2); + case 25: + return MemcmpFixed<25>(str1, str2); + case 26: + return MemcmpFixed<26>(str1, str2); + case 27: + return MemcmpFixed<27>(str1, str2); + case 28: + return MemcmpFixed<28>(str1, str2); + case 29: + return MemcmpFixed<29>(str1, str2); + case 30: + return MemcmpFixed<30>(str1, str2); + case 31: + return MemcmpFixed<31>(str1, str2); + case 32: + return MemcmpFixed<32>(str1, str2); + case 33: + return MemcmpFixed<33>(str1, str2); + case 34: + return MemcmpFixed<34>(str1, str2); + case 35: + return MemcmpFixed<35>(str1, str2); + case 36: + return MemcmpFixed<36>(str1, str2); + case 37: + return MemcmpFixed<37>(str1, str2); + case 38: + return MemcmpFixed<38>(str1, str2); + case 39: + return MemcmpFixed<39>(str1, str2); + case 40: + return MemcmpFixed<40>(str1, str2); + case 41: + return MemcmpFixed<41>(str1, str2); + case 42: + return MemcmpFixed<42>(str1, str2); + case 43: + return MemcmpFixed<43>(str1, str2); + case 44: + return MemcmpFixed<44>(str1, str2); + case 45: + return MemcmpFixed<45>(str1, str2); + case 46: + return MemcmpFixed<46>(str1, str2); + case 47: + return MemcmpFixed<47>(str1, str2); + case 48: + return MemcmpFixed<48>(str1, str2); + case 49: + return MemcmpFixed<49>(str1, str2); + case 50: + return MemcmpFixed<50>(str1, str2); + case 51: + return MemcmpFixed<51>(str1, str2); + case 52: + return MemcmpFixed<52>(str1, str2); + case 53: + return MemcmpFixed<53>(str1, str2); + case 54: + return MemcmpFixed<54>(str1, str2); + case 55: + return MemcmpFixed<55>(str1, str2); + case 56: + return MemcmpFixed<56>(str1, str2); + case 57: + return MemcmpFixed<57>(str1, str2); + case 58: + return MemcmpFixed<58>(str1, str2); + case 59: + return MemcmpFixed<59>(str1, str2); + case 60: + return MemcmpFixed<60>(str1, str2); + case 61: + return MemcmpFixed<61>(str1, str2); + case 62: + return MemcmpFixed<62>(str1, str2); + case 63: + return MemcmpFixed<63>(str1, str2); + case 64: + return MemcmpFixed<64>(str1, str2); + default: + return memcmp(str1, str2, size); } + // LCOV_EXCL_STOP +} - //! Get statistics of a physical column within the table - unique_ptr GetStatistics(ClientContext &context, column_t column_id); - //! Sets statistics of a physical column within the table - void SetStatistics(column_t column_id, const std::function &set_fun); - - //! Checkpoint the table to the specified table data writer - void Checkpoint(TableDataWriter &writer); - void CommitDropTable(); - void CommitDropColumn(idx_t index); - - idx_t GetTotalRows(); - - //! Appends an empty row_group to the table - void AppendRowGroup(idx_t start_row); - - vector> GetStorageInfo(); - static bool IsForeignKeyIndex(const vector &fk_keys, Index &index, ForeignKeyType fk_type); - -private: - //! Verify the new added constraints against current persistent&local data - void VerifyNewConstraint(ClientContext &context, DataTable &parent, const BoundConstraint *constraint); - //! Verify constraints with a chunk from the Append containing all columns of the table - void VerifyAppendConstraints(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); - //! Verify constraints with a chunk from the Update containing only the specified column_ids - void VerifyUpdateConstraints(TableCatalogEntry &table, DataChunk &chunk, const vector &column_ids); - //! Verify constraints with a chunk from the Delete containing all columns of the table - void VerifyDeleteConstraints(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); - - void InitializeScanWithOffset(TableScanState &state, const vector &column_ids, idx_t start_row, - idx_t end_row); - bool InitializeScanInRowGroup(TableScanState &state, const vector &column_ids, - TableFilterSet *table_filters, RowGroup *row_group, idx_t vector_index, - idx_t max_row); - bool ScanBaseTable(Transaction &transaction, DataChunk &result, TableScanState &state); - - //! The CreateIndexScan is a special scan that is used to create an index on the table, it keeps locks on the table - void InitializeCreateIndexScan(CreateIndexScanState &state, const vector &column_ids); - bool ScanCreateIndex(CreateIndexScanState &state, DataChunk &result, TableScanType type); - -private: - //! Lock for appending entries to the table - mutex append_lock; - //! The number of rows in the table - atomic total_rows; - //! The segment trees holding the various row_groups of the table - shared_ptr row_groups; - //! Column statistics - vector> column_stats; - //! The statistics lock - mutex stats_lock; - //! Whether or not the data table is the root DataTable for this table; the root DataTable is the newest version - //! that can be appended to - atomic is_root; -}; } // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/index/art/art.hpp +// duckdb/common/sort/comparators.hpp // // //===----------------------------------------------------------------------===// @@ -4073,6 +4508,13 @@ class DataTable { +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/types/row_layout.hpp +// +// +//===----------------------------------------------------------------------===// @@ -4082,19 +4524,17 @@ class DataTable { //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/index/art/art_key.hpp +// duckdb/execution/operator/aggregate/aggregate_object.hpp // // //===----------------------------------------------------------------------===// - - //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/common/bit_operations.hpp +// duckdb/execution/expression_executor.hpp // // //===----------------------------------------------------------------------===// @@ -4107,314 +4547,349 @@ class DataTable { -#include -#include // strlen() on Solaris -#include - namespace duckdb { +class Allocator; +class ExecutionContext; -#define BSWAP16(x) ((uint16_t)((((uint16_t)(x)&0xff00) >> 8) | (((uint16_t)(x)&0x00ff) << 8))) +//! ExpressionExecutor is responsible for executing a set of expressions and storing the result in a data chunk +class ExpressionExecutor { + friend class Index; + friend class CreateIndexLocalSinkState; -#define BSWAP32(x) \ - ((uint32_t)((((uint32_t)(x)&0xff000000) >> 24) | (((uint32_t)(x)&0x00ff0000) >> 8) | \ - (((uint32_t)(x)&0x0000ff00) << 8) | (((uint32_t)(x)&0x000000ff) << 24))) +public: + DUCKDB_API explicit ExpressionExecutor(ClientContext &context); + DUCKDB_API ExpressionExecutor(ClientContext &context, const Expression *expression); + DUCKDB_API ExpressionExecutor(ClientContext &context, const Expression &expression); + DUCKDB_API ExpressionExecutor(ClientContext &context, const vector> &expressions); + ExpressionExecutor(ExpressionExecutor &&) = delete; -#define BSWAP64(x) \ - ((uint64_t)((((uint64_t)(x)&0xff00000000000000ull) >> 56) | (((uint64_t)(x)&0x00ff000000000000ull) >> 40) | \ - (((uint64_t)(x)&0x0000ff0000000000ull) >> 24) | (((uint64_t)(x)&0x000000ff00000000ull) >> 8) | \ - (((uint64_t)(x)&0x00000000ff000000ull) << 8) | (((uint64_t)(x)&0x0000000000ff0000ull) << 24) | \ - (((uint64_t)(x)&0x000000000000ff00ull) << 40) | (((uint64_t)(x)&0x00000000000000ffull) << 56))) + //! The expressions of the executor + vector expressions; + //! The data chunk of the current physical operator, used to resolve + //! column references and determines the output cardinality + DataChunk *chunk = nullptr; -struct Radix { public: - static inline bool IsLittleEndian() { - int n = 1; - if (*(char *)&n == 1) { - return true; - } else { - return false; - } - } + bool HasContext(); + ClientContext &GetContext(); + Allocator &GetAllocator(); - template - static inline void EncodeData(data_ptr_t dataptr, T value) { - throw NotImplementedException("Cannot create data from this type"); - } + //! Add an expression to the set of to-be-executed expressions of the executor + DUCKDB_API void AddExpression(const Expression &expr); - static inline void EncodeStringDataPrefix(data_ptr_t dataptr, string_t value, idx_t prefix_len) { - auto len = value.GetSize(); - memcpy(dataptr, value.GetDataUnsafe(), MinValue(len, prefix_len)); - if (len < prefix_len) { - memset(dataptr + len, '\0', prefix_len - len); - } + //! Execute the set of expressions with the given input chunk and store the result in the output chunk + DUCKDB_API void Execute(DataChunk *input, DataChunk &result); + inline void Execute(DataChunk &input, DataChunk &result) { + Execute(&input, result); } - - static inline uint8_t FlipSign(uint8_t key_byte) { - return key_byte ^ 128; + inline void Execute(DataChunk &result) { + Execute(nullptr, result); } - static inline uint32_t EncodeFloat(float x) { - uint64_t buff; + //! Execute the ExpressionExecutor and put the result in the result vector; this should only be used for expression + //! executors with a single expression + DUCKDB_API void ExecuteExpression(DataChunk &input, Vector &result); + //! Execute the ExpressionExecutor and put the result in the result vector; this should only be used for expression + //! executors with a single expression + DUCKDB_API void ExecuteExpression(Vector &result); + //! Execute the ExpressionExecutor and generate a selection vector from all true values in the result; this should + //! only be used with a single boolean expression + DUCKDB_API idx_t SelectExpression(DataChunk &input, SelectionVector &sel); - //! zero - if (x == 0) { - buff = 0; - buff |= (1u << 31); - return buff; - } - // nan - if (Value::IsNan(x)) { - return UINT_MAX; - } - //! infinity - if (x > FLT_MAX) { - return UINT_MAX - 1; - } - //! -infinity - if (x < -FLT_MAX) { - return 0; - } - buff = Load((const_data_ptr_t)&x); - if ((buff & (1u << 31)) == 0) { //! +0 and positive numbers - buff |= (1u << 31); - } else { //! negative numbers - buff = ~buff; //! complement 1 - } + //! Execute the expression with index `expr_idx` and store the result in the result vector + DUCKDB_API void ExecuteExpression(idx_t expr_idx, Vector &result); + //! Evaluate a scalar expression and fold it into a single value + DUCKDB_API static Value EvaluateScalar(ClientContext &context, const Expression &expr, + bool allow_unfoldable = false); + //! Try to evaluate a scalar expression and fold it into a single value, returns false if an exception is thrown + DUCKDB_API static bool TryEvaluateScalar(ClientContext &context, const Expression &expr, Value &result); - return buff; - } + //! Initialize the state of a given expression + static unique_ptr InitializeState(const Expression &expr, ExpressionExecutorState &state); - static inline uint64_t EncodeDouble(double x) { - uint64_t buff; - //! zero - if (x == 0) { - buff = 0; - buff += (1ull << 63); - return buff; - } - // nan - if (Value::IsNan(x)) { - return ULLONG_MAX; - } - //! infinity - if (x > DBL_MAX) { - return ULLONG_MAX - 1; - } - //! -infinity - if (x < -DBL_MAX) { - return 0; - } - buff = Load((const_data_ptr_t)&x); - if (buff < (1ull << 63)) { //! +0 and positive numbers - buff += (1ull << 63); - } else { //! negative numbers - buff = ~buff; //! complement 1 - } - return buff; + inline void SetChunk(DataChunk *chunk) { + this->chunk = chunk; + } + inline void SetChunk(DataChunk &chunk) { + SetChunk(&chunk); } -}; - -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, bool value) { - Store(value ? 1 : 0, dataptr); -} - -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, int8_t value) { - Store(value, dataptr); - dataptr[0] = FlipSign(dataptr[0]); -} -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, int16_t value) { - Store(BSWAP16(value), dataptr); - dataptr[0] = FlipSign(dataptr[0]); -} + DUCKDB_API vector> &GetStates(); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, int32_t value) { - Store(BSWAP32(value), dataptr); - dataptr[0] = FlipSign(dataptr[0]); -} +protected: + void Initialize(const Expression &expr, ExpressionExecutorState &state); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, int64_t value) { - Store(BSWAP64(value), dataptr); - dataptr[0] = FlipSign(dataptr[0]); -} + static unique_ptr InitializeState(const BoundReferenceExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundBetweenExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundCaseExpression &expr, ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundCastExpression &expr, ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundComparisonExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundConjunctionExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundConstantExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundFunctionExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundOperatorExpression &expr, + ExpressionExecutorState &state); + static unique_ptr InitializeState(const BoundParameterExpression &expr, + ExpressionExecutorState &state); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, uint8_t value) { - Store(value, dataptr); -} + void Execute(const Expression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, uint16_t value) { - Store(BSWAP16(value), dataptr); -} + void Execute(const BoundBetweenExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundCaseExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundCastExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, uint32_t value) { - Store(BSWAP32(value), dataptr); -} + void Execute(const BoundComparisonExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundConjunctionExpression &expr, ExpressionState *state, const SelectionVector *sel, + idx_t count, Vector &result); + void Execute(const BoundConstantExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundFunctionExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundOperatorExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundParameterExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); + void Execute(const BoundReferenceExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + Vector &result); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, uint64_t value) { - Store(BSWAP64(value), dataptr); -} + //! Execute the (boolean-returning) expression and generate a selection vector with all entries that are "true" in + //! the result + idx_t Select(const Expression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + SelectionVector *true_sel, SelectionVector *false_sel); + idx_t DefaultSelect(const Expression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + SelectionVector *true_sel, SelectionVector *false_sel); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, hugeint_t value) { - EncodeData(dataptr, value.upper); - EncodeData(dataptr + sizeof(value.upper), value.lower); -} + idx_t Select(const BoundBetweenExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + SelectionVector *true_sel, SelectionVector *false_sel); + idx_t Select(const BoundComparisonExpression &expr, ExpressionState *state, const SelectionVector *sel, idx_t count, + SelectionVector *true_sel, SelectionVector *false_sel); + idx_t Select(const BoundConjunctionExpression &expr, ExpressionState *state, const SelectionVector *sel, + idx_t count, SelectionVector *true_sel, SelectionVector *false_sel); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, float value) { - uint32_t converted_value = EncodeFloat(value); - Store(BSWAP32(converted_value), dataptr); -} + //! Verify that the output of a step in the ExpressionExecutor is correct + void Verify(const Expression &expr, Vector &result, idx_t count); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, double value) { - uint64_t converted_value = EncodeDouble(value); - Store(BSWAP64(converted_value), dataptr); -} + void FillSwitch(Vector &vector, Vector &result, const SelectionVector &sel, sel_t count); -template <> -inline void Radix::EncodeData(data_ptr_t dataptr, interval_t value) { - EncodeData(dataptr, value.months); - dataptr += sizeof(value.months); - EncodeData(dataptr, value.days); - dataptr += sizeof(value.days); - EncodeData(dataptr, value.micros); -} +private: + //! Client context + ClientContext *context; + //! The states of the expression executor; this holds any intermediates and temporary states of expressions + vector> states; +private: + // it is possible to create an expression executor without a ClientContext - but it should be avoided + DUCKDB_API ExpressionExecutor(); + DUCKDB_API ExpressionExecutor(const vector> &exprs); +}; } // namespace duckdb - namespace duckdb { -class Key { -public: - Key(unique_ptr data, idx_t len); - - explicit Key(idx_t len); - - idx_t len; - unique_ptr data; +class BoundAggregateExpression; -public: - template - static inline unique_ptr CreateKey(T element) { - auto data = Key::CreateData(element); - return make_unique(move(data), sizeof(element)); - } +struct AggregateObject { + AggregateObject(AggregateFunction function, FunctionData *bind_data, idx_t child_count, idx_t payload_size, + AggregateType aggr_type, PhysicalType return_type, Expression *filter = nullptr); + AggregateObject(BoundAggregateExpression *aggr); - template - static inline unique_ptr CreateKey(const Value &element) { - return CreateKey(element.GetValueUnsafe()); - } + AggregateFunction function; + FunctionData *bind_data; + idx_t child_count; + idx_t payload_size; + AggregateType aggr_type; + PhysicalType return_type; + Expression *filter = nullptr; public: - data_t &operator[](std::size_t i) { - return data[i]; - } - const data_t &operator[](std::size_t i) const { - return data[i]; - } - bool operator>(const Key &k) const; - bool operator<(const Key &k) const; - bool operator>=(const Key &k) const; - bool operator==(const Key &k) const; - -private: - template - static inline unique_ptr CreateData(T value) { - auto data = unique_ptr(new data_t[sizeof(value)]); - Radix::EncodeData(data.get(), value); - return data; + bool IsDistinct() const { + return aggr_type == AggregateType::DISTINCT; } + static vector CreateAggregateObjects(const vector &bindings); }; -template <> -unique_ptr Key::CreateKey(string_t value); -template <> -unique_ptr Key::CreateKey(const char *value); - -} // namespace duckdb - -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/execution/index/art/leaf.hpp -// -// -//===----------------------------------------------------------------------===// - +struct AggregateFilterData { + AggregateFilterData(ClientContext &context, Expression &filter_expr, const vector &payload_types); + idx_t ApplyFilter(DataChunk &payload); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/execution/index/art/node.hpp -// -// -//===----------------------------------------------------------------------===// + ExpressionExecutor filter_executor; + DataChunk filtered_payload; + SelectionVector true_sel; +}; +struct AggregateFilterDataSet { + AggregateFilterDataSet(); + vector> filter_data; +public: + void Initialize(ClientContext &context, const vector &aggregates, + const vector &payload_types); + AggregateFilterData &GetFilterData(idx_t aggr_idx); +}; +} // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/meta_block_reader.hpp -// -// -//===----------------------------------------------------------------------===// +namespace duckdb { +class RowLayout { +public: + using Aggregates = vector; + using ValidityBytes = TemplatedValidityMask; + //! Creates an empty RowLayout + RowLayout(); +public: + //! Initializes the RowLayout with the specified types and aggregates to an empty RowLayout + void Initialize(vector types_p, Aggregates aggregates_p, bool align = true); + //! Initializes the RowLayout with the specified types to an empty RowLayout + void Initialize(vector types, bool align = true); + //! Initializes the RowLayout with the specified aggregates to an empty RowLayout + void Initialize(Aggregates aggregates_p, bool align = true); + //! Returns the number of data columns + inline idx_t ColumnCount() const { + return types.size(); + } + //! Returns a list of the column types for this data chunk + inline const vector &GetTypes() const { + return types; + } + //! Returns the number of aggregates + inline idx_t AggregateCount() const { + return aggregates.size(); + } + //! Returns a list of the aggregates for this data chunk + inline Aggregates &GetAggregates() { + return aggregates; + } + //! Returns the total width required for each row, including padding + inline idx_t GetRowWidth() const { + return row_width; + } + //! Returns the offset to the start of the data + inline idx_t GetDataOffset() const { + return flag_width; + } + //! Returns the total width required for the data, including padding + inline idx_t GetDataWidth() const { + return data_width; + } + //! Returns the offset to the start of the aggregates + inline idx_t GetAggrOffset() const { + return flag_width + data_width; + } + //! Returns the total width required for the aggregates, including padding + inline idx_t GetAggrWidth() const { + return aggr_width; + } + //! Returns the column offsets into each row + inline const vector &GetOffsets() const { + return offsets; + } + //! Returns whether all columns in this layout are constant size + inline bool AllConstant() const { + return all_constant; + } + inline idx_t GetHeapOffset() const { + return heap_pointer_offset; + } +private: + //! The types of the data columns + vector types; + //! The aggregate functions + Aggregates aggregates; + //! The width of the validity header + idx_t flag_width; + //! The width of the data portion + idx_t data_width; + //! The width of the aggregate state portion + idx_t aggr_width; + //! The width of the entire row + idx_t row_width; + //! The offsets to the columns and aggregate data in each row + vector offsets; + //! Whether all columns in this layout are constant size + bool all_constant; + //! Offset to the pointer to the heap for each row + idx_t heap_pointer_offset; +}; +} // namespace duckdb namespace duckdb { -class BlockHandle; -class BufferHandle; -class DatabaseInstance; -//! This struct is responsible for reading meta data from disk -class MetaBlockReader : public Deserializer { -public: - MetaBlockReader(DatabaseInstance &db, block_id_t block); - ~MetaBlockReader() override; +struct SortLayout; +struct SBScanState; - DatabaseInstance &db; - shared_ptr block; - BufferHandle handle; - idx_t offset; - block_id_t next_block; +using ValidityBytes = RowLayout::ValidityBytes; +struct Comparators { public: - //! Read content of size read_size into the buffer - void ReadData(data_ptr_t buffer, idx_t read_size) override; + //! Whether a tie between two blobs can be broken + static bool TieIsBreakable(const idx_t &col_idx, const data_ptr_t &row_ptr, const SortLayout &sort_layout); + //! Compares the tuples that a being read from in the 'left' and 'right blocks during merge sort + //! (only in case we cannot simply 'memcmp' - if there are blob columns) + static int CompareTuple(const SBScanState &left, const SBScanState &right, const data_ptr_t &l_ptr, + const data_ptr_t &r_ptr, const SortLayout &sort_layout, const bool &external_sort); + //! Compare two blob values + static int CompareVal(const data_ptr_t l_ptr, const data_ptr_t r_ptr, const LogicalType &type); private: - void ReadNewBlock(block_id_t id); + //! Compares two blob values that were initially tied by their prefix + static int BreakBlobTie(const idx_t &tie_col, const SBScanState &left, const SBScanState &right, + const SortLayout &sort_layout, const bool &external); + //! Compare two fixed-size values + template + static int TemplatedCompareVal(const data_ptr_t &left_ptr, const data_ptr_t &right_ptr); + + //! Compare two values at the pointers (can be recursive if nested type) + static int CompareValAndAdvance(data_ptr_t &l_ptr, data_ptr_t &r_ptr, const LogicalType &type); + //! Compares two fixed-size values at the given pointers + template + static int TemplatedCompareAndAdvance(data_ptr_t &left_ptr, data_ptr_t &right_ptr); + //! Compares two string values at the given pointers + static int CompareStringAndAdvance(data_ptr_t &left_ptr, data_ptr_t &right_ptr); + //! Compares two struct values at the given pointers (recursive) + static int CompareStructAndAdvance(data_ptr_t &left_ptr, data_ptr_t &right_ptr, + const child_list_t &types); + //! Compare two list values at the pointers (can be recursive if nested type) + static int CompareListAndAdvance(data_ptr_t &left_ptr, data_ptr_t &right_ptr, const LogicalType &type); + //! Compares a list of fixed-size values + template + static int TemplatedCompareListLoop(data_ptr_t &left_ptr, data_ptr_t &right_ptr, const ValidityBytes &left_validity, + const ValidityBytes &right_validity, const idx_t &count); + + //! Unwizzles an offset into a pointer + static void UnswizzleSingleValue(data_ptr_t data_ptr, const data_ptr_t &heap_ptr, const LogicalType &type); + //! Swizzles a pointer into an offset + static void SwizzleSingleValue(data_ptr_t data_ptr, const data_ptr_t &heap_ptr, const LogicalType &type); }; -} // namespace duckdb +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/index/art/prefix.hpp +// duckdb/common/types/row_data_collection_scanner.hpp // // //===----------------------------------------------------------------------===// @@ -4425,278 +4900,338 @@ class MetaBlockReader : public Deserializer { namespace duckdb { -class Prefix { -public: - Prefix(); - // Prefix created from key starting on `depth`. - Prefix(Key &key, uint32_t depth, uint32_t size); - - // Returns the Prefix's size - uint32_t Size() const; - - // Subscript operator - uint8_t &operator[](idx_t idx); - // Assign operator - Prefix &operator=(const Prefix &src); - - // Move operator - Prefix &operator=(Prefix &&other) noexcept; - - // Concatenate Prefix with a key and another prefix - // Used when deleting a Node. - // other.prefix + key + this->Prefix - void Concatenate(uint8_t key, Prefix &other); - // Reduces the prefix in n elements, and returns what would be the first one as a key - uint8_t Reduce(uint32_t n); - // Serializes Prefix - void Serialize(duckdb::MetaBlockWriter &writer); - // Deserializes Prefix - void Deserialize(duckdb::MetaBlockReader &reader); +class BufferHandle; +class RowDataCollection; +struct RowDataBlock; +class DataChunk; - // Compare the key with the prefix of the node, return the position where it mismatches - uint32_t KeyMismatchPosition(Key &key, uint64_t depth); +//! Used to scan the data into DataChunks after sorting +struct RowDataCollectionScanner { +public: + using Types = vector; -private: - unique_ptr prefix; - uint32_t size; -}; + struct ScanState { + explicit ScanState(const RowDataCollectionScanner &scanner_p) : scanner(scanner_p), block_idx(0), entry_idx(0) { + } -} // namespace duckdb + void PinData(); + //! The data layout + const RowDataCollectionScanner &scanner; -namespace duckdb { -enum class NodeType : uint8_t { N4 = 0, N16 = 1, N48 = 2, N256 = 3, NLeaf = 4 }; + idx_t block_idx; + idx_t entry_idx; -class ART; -class Node; -class SwizzleablePointer; + BufferHandle data_handle; + BufferHandle heap_handle; -struct InternalType { - explicit InternalType(Node *n); - void Set(uint8_t *key_p, uint16_t key_size_p, SwizzleablePointer *children_p, uint16_t children_size_p); - uint8_t *key; - uint16_t key_size; - SwizzleablePointer *children; - uint16_t children_size; -}; + // We must pin ALL blocks we are going to gather from + vector pinned_blocks; + }; -class Node { -public: - static const uint8_t EMPTY_MARKER = 48; + //! Ensure that heap blocks correspond to row blocks + static void AlignHeapBlocks(RowDataCollection &dst_block_collection, RowDataCollection &dst_string_heap, + RowDataCollection &src_block_collection, RowDataCollection &src_string_heap, + const RowLayout &layout); -public: - explicit Node(NodeType type); + RowDataCollectionScanner(RowDataCollection &rows, RowDataCollection &heap, const RowLayout &layout, bool external, + bool flush = true); - virtual ~Node() { + //! The type layout of the payload + inline const vector &GetTypes() const { + return layout.GetTypes(); } - //! number of non-null children - uint16_t count; - //! node type - NodeType type; - //! compressed path (prefix) - Prefix prefix; - //! Get the position of a child corresponding exactly to the specific byte, returns DConstants::INVALID_INDEX if not - //! exists - virtual idx_t GetChildPos(uint8_t k) { - return DConstants::INVALID_INDEX; - } - //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX - //! if there are no children matching the criteria - virtual idx_t GetChildGreaterEqual(uint8_t k, bool &equal) { - throw InternalException("Unimplemented GetChildGreaterEqual for ARTNode"); + //! The number of rows in the collection + inline idx_t Count() const { + return total_count; } - //! Get the position of the biggest element in node - virtual idx_t GetMin(); - - //! Serialize this Node - BlockPointer Serialize(ART &art, duckdb::MetaBlockWriter &writer); - static Node *Deserialize(ART &art, idx_t block_id, idx_t offset); + //! The number of rows scanned so far + inline idx_t Scanned() const { + return total_scanned; + } - //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position. if pos == - //! DConstants::INVALID_INDEX, then the first valid position in the node will be returned. - virtual idx_t GetNextPos(idx_t pos) { - return DConstants::INVALID_INDEX; + //! The number of remaining rows + inline idx_t Remaining() const { + return total_count - total_scanned; } - //! Get the child at the specified position in the node. pos should be between [0, count). Throws an assertion if - //! the element is not found. - virtual Node *GetChild(ART &art, idx_t pos); - //! Replaces the pointer - virtual void ReplaceChildPointer(idx_t pos, Node *node); + //! Swizzle the blocks for external scanning + //! Swizzling is all or nothing, so if we have scanned previously, + //! we need to re-swizzle. + void ReSwizzle(); + + void SwizzleBlock(RowDataBlock &data_block, RowDataBlock &heap_block); - //! Insert leaf into inner node - static void InsertLeaf(Node *&node, uint8_t key, Node *new_node); - //! Erase entry from node - static void Erase(Node *&node, idx_t pos, ART &art); + //! Scans the next data chunk from the sorted data + void Scan(DataChunk &chunk); private: - //! Serialize Internal Nodes - BlockPointer SerializeInternal(ART &art, duckdb::MetaBlockWriter &writer, InternalType &internal_type); - //! Deserialize Internal Nodes - void DeserializeInternal(duckdb::MetaBlockReader &reader); + //! The row data being scanned + RowDataCollection &rows; + //! The row heap being scanned + RowDataCollection &heap; + //! The data layout + const RowLayout layout; + //! Read state + ScanState read_state; + //! The total count of sorted_data + const idx_t total_count; + //! The number of rows scanned so far + idx_t total_scanned; + //! Addresses used to gather from the sorted data + Vector addresses = Vector(LogicalType::POINTER); + //! Whether the blocks can be flushed to disk + const bool external; + //! Whether to flush the blocks after scanning + const bool flush; + //! Whether we are unswizzling the blocks + const bool unswizzling; + + //! Checks that the newest block is valid + void ValidateUnscannedBlock() const; }; } // namespace duckdb -namespace duckdb { -class Leaf : public Node { -public: - Leaf(Key &value, unsigned depth, row_t row_id); +namespace duckdb { - Leaf(unique_ptr row_ids, idx_t num_elements, Prefix &prefix); - idx_t capacity; +class BufferManager; +struct RowDataBlock; +struct SortLayout; +struct GlobalSortState; - row_t GetRowId(idx_t index) { - return row_ids[index]; - } +enum class SortedDataType { BLOB, PAYLOAD }; +//! Object that holds sorted rows, and an accompanying heap if there are blobs +struct SortedData { public: - void Insert(row_t row_id); - void Remove(row_t row_id); - - BlockPointer Serialize(duckdb::MetaBlockWriter &writer); + SortedData(SortedDataType type, const RowLayout &layout, BufferManager &buffer_manager, GlobalSortState &state); + //! Number of rows that this object holds + idx_t Count(); + //! Initialize new block to write to + void CreateBlock(); + //! Create a slice that holds the rows between the start and end indices + unique_ptr CreateSlice(idx_t start_block_index, idx_t end_block_index, idx_t end_entry_index); + //! Unswizzles all + void Unswizzle(); - static Leaf *Deserialize(duckdb::MetaBlockReader &reader); +public: + const SortedDataType type; + //! Layout of this data + const RowLayout layout; + //! Data and heap blocks + vector> data_blocks; + vector> heap_blocks; + //! Whether the pointers in this sorted data are swizzled + bool swizzled; private: - unique_ptr row_ids; + //! The buffer manager + BufferManager &buffer_manager; + //! The global state + GlobalSortState &state; }; -} // namespace duckdb +//! Block that holds sorted rows: radix, blob and payload data +struct SortedBlock { +public: + SortedBlock(BufferManager &buffer_manager, GlobalSortState &gstate); + //! Number of rows that this object holds + idx_t Count() const; + //! Initialize this block to write data to + void InitializeWrite(); + //! Init new block to write to + void CreateBlock(); + //! Fill this sorted block by appending the blocks held by a vector of sorted blocks + void AppendSortedBlocks(vector> &sorted_blocks); + //! Locate the block and entry index of a row in this block, + //! given an index between 0 and the total number of rows in this block + void GlobalToLocalIndex(const idx_t &global_idx, idx_t &local_block_index, idx_t &local_entry_index); + //! Create a slice that holds the rows between the start and end indices + unique_ptr CreateSlice(const idx_t start, const idx_t end, idx_t &entry_idx); + //! Size (in bytes) of the heap of this block + idx_t HeapSize() const; + //! Total size (in bytes) of this block + idx_t SizeInBytes() const; -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/execution/index/art/node4.hpp -// -// -//===----------------------------------------------------------------------===// +public: + //! Radix/memcmp sortable data + vector> radix_sorting_data; + //! Variable sized sorting data + unique_ptr blob_sorting_data; + //! Payload data + unique_ptr payload_data; +private: + //! Buffer manager, global state, and sorting layout constants + BufferManager &buffer_manager; + GlobalSortState &state; + const SortLayout &sort_layout; + const RowLayout &payload_layout; +}; +//! State used to scan a SortedBlock e.g. during merge sort +struct SBScanState { +public: + SBScanState(BufferManager &buffer_manager, GlobalSortState &state); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/execution/index/art/swizzleable_pointer.hpp -// -// -//===----------------------------------------------------------------------===// + void PinRadix(idx_t block_idx_to); + void PinData(SortedData &sd); + data_ptr_t RadixPtr() const; + data_ptr_t DataPtr(SortedData &sd) const; + data_ptr_t HeapPtr(SortedData &sd) const; + data_ptr_t BaseHeapPtr(SortedData &sd) const; + idx_t Remaining() const; -namespace duckdb { + void SetIndices(idx_t block_idx_to, idx_t entry_idx_to); -class SwizzleablePointer { public: - ~SwizzleablePointer(); - explicit SwizzleablePointer(duckdb::MetaBlockReader &reader); - SwizzleablePointer() : pointer(0) {}; + BufferManager &buffer_manager; + const SortLayout &sort_layout; + GlobalSortState &state; - uint64_t pointer; + SortedBlock *sb; - //! Transforms from Node* to uint64_t - SwizzleablePointer &operator=(const Node *ptr); - friend bool operator!=(const SwizzleablePointer &s_ptr, const uint64_t &ptr); + idx_t block_idx; + idx_t entry_idx; - //! Extracts block info from swizzled pointer - BlockPointer GetSwizzledBlockInfo(); - //! Checks if pointer is swizzled - bool IsSwizzled(); - //! Deletes the underlying object (if necessary) and set the pointer to null_ptr - void Reset(); - //! Unswizzle the pointer (if possible) - Node *Unswizzle(ART &art); + BufferHandle radix_handle; - BlockPointer Serialize(ART &art, duckdb::MetaBlockWriter &writer); + BufferHandle blob_sorting_data_handle; + BufferHandle blob_sorting_heap_handle; + + BufferHandle payload_data_handle; + BufferHandle payload_heap_handle; }; -} // namespace duckdb +//! Used to scan the data into DataChunks after sorting +struct PayloadScanner { +public: + PayloadScanner(SortedData &sorted_data, GlobalSortState &global_sort_state, bool flush = true); + explicit PayloadScanner(GlobalSortState &global_sort_state, bool flush = true); -namespace duckdb { + //! Scan a single block + PayloadScanner(GlobalSortState &global_sort_state, idx_t block_idx); -class Node4 : public Node { -public: - Node4(); - uint8_t key[4]; - // Pointers to the child nodes - SwizzleablePointer children[4]; + //! The type layout of the payload + inline const vector &GetPayloadTypes() const { + return scanner->GetTypes(); + } -public: - //! Get position of a byte, returns -1 if not exists - idx_t GetChildPos(uint8_t k) override; - //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX - //! if there are no children matching the criteria - idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; - //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position - idx_t GetNextPos(idx_t pos) override; - //! Get Node4 Child - Node *GetChild(ART &art, idx_t pos) override; - //! Replace child pointer - void ReplaceChildPointer(idx_t pos, Node *node) override; + //! The number of rows scanned so far + inline idx_t Scanned() const { + return scanner->Scanned(); + } - idx_t GetMin() override; + //! The number of remaining rows + inline idx_t Remaining() const { + return scanner->Remaining(); + } - //! Insert Leaf to the Node4 - static void Insert(Node *&node, uint8_t key_byte, Node *new_child); - //! Remove Leaf from Node4 - static void Erase(Node *&node, int pos, ART &art); + //! Scans the next data chunk from the sorted data + void Scan(DataChunk &chunk); + +private: + //! The sorted data being scanned + unique_ptr rows; + unique_ptr heap; + //! The actual scanner + unique_ptr scanner; }; -} // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/execution/index/art/node16.hpp -// -// -//===----------------------------------------------------------------------===// +struct SBIterator { + static int ComparisonValue(ExpressionType comparison); + SBIterator(GlobalSortState &gss, ExpressionType comparison, idx_t entry_idx_p = 0); + inline idx_t GetIndex() const { + return entry_idx; + } + inline void SetIndex(idx_t entry_idx_p) { + const auto new_block_idx = entry_idx_p / block_capacity; + if (new_block_idx != scan.block_idx) { + scan.SetIndices(new_block_idx, 0); + if (new_block_idx < block_count) { + scan.PinRadix(scan.block_idx); + block_ptr = scan.RadixPtr(); + if (!all_constant) { + scan.PinData(*scan.sb->blob_sorting_data); + } + } + } + scan.entry_idx = entry_idx_p % block_capacity; + entry_ptr = block_ptr + scan.entry_idx * entry_size; + entry_idx = entry_idx_p; + } -namespace duckdb { + inline SBIterator &operator++() { + if (++scan.entry_idx < block_capacity) { + entry_ptr += entry_size; + ++entry_idx; + } else { + SetIndex(entry_idx + 1); + } -class Node16 : public Node { -public: - explicit Node16(); - uint8_t key[16]; - SwizzleablePointer children[16]; + return *this; + } -public: - //! Get position of a byte, returns -1 if not exists - idx_t GetChildPos(uint8_t k) override; - //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX - //! if there are no children matching the criteria - idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; - //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position - idx_t GetNextPos(idx_t pos) override; - //! Get Node16 Child - Node *GetChild(ART &art, idx_t pos) override; + inline SBIterator &operator--() { + if (scan.entry_idx) { + --scan.entry_idx; + --entry_idx; + entry_ptr -= entry_size; + } else { + SetIndex(entry_idx - 1); + } - //! Replace child pointer - void ReplaceChildPointer(idx_t pos, Node *node) override; + return *this; + } - idx_t GetMin() override; + inline bool Compare(const SBIterator &other) const { + int comp_res; + if (all_constant) { + comp_res = FastMemcmp(entry_ptr, other.entry_ptr, cmp_size); + } else { + comp_res = Comparators::CompareTuple(scan, other.scan, entry_ptr, other.entry_ptr, sort_layout, external); + } + + return comp_res <= cmp; + } + + // Fixed comparison parameters + const SortLayout &sort_layout; + const idx_t block_count; + const idx_t block_capacity; + const size_t cmp_size; + const size_t entry_size; + const bool all_constant; + const bool external; + const int cmp; - //! Insert node into Node16 - static void Insert(Node *&node, uint8_t key_byte, Node *child); - //! Shrink to node 4 - static void Erase(Node *&node, int pos, ART &art); + // Iteration state + SBScanState scan; + idx_t entry_idx; + data_ptr_t block_ptr; + data_ptr_t entry_ptr; }; + } // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/index/art/node48.hpp +// duckdb/common/types/row_data_collection.hpp // // //===----------------------------------------------------------------------===// @@ -4705,42 +5240,10 @@ class Node16 : public Node { -namespace duckdb { - -class Node48 : public Node { -public: - explicit Node48(); - uint8_t child_index[256]; - - SwizzleablePointer children[48]; - -public: - //! Get position of a byte, returns -1 if not exists - idx_t GetChildPos(uint8_t k) override; - //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX - //! if there are no children matching the criteria - idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; - //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position - idx_t GetNextPos(idx_t pos) override; - //! Get Node48 Child - Node *GetChild(ART &art, idx_t pos) override; - - idx_t GetMin() override; - //! Replace child pointer - void ReplaceChildPointer(idx_t pos, Node *node) override; - - //! Insert node in Node48 - static void Insert(Node *&node, uint8_t key_byte, Node *child); - - //! Shrink to node 16 - static void Erase(Node *&node, int pos, ART &art); -}; -} // namespace duckdb - //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/index/art/node256.hpp +// duckdb/storage/buffer_manager.hpp // // //===----------------------------------------------------------------------===// @@ -4749,42 +5252,13 @@ class Node48 : public Node { -namespace duckdb { - -class Node256 : public Node { -public: - explicit Node256(); - - SwizzleablePointer children[256]; - -public: - //! Get position of a specific byte, returns DConstants::INVALID_INDEX if not exists - idx_t GetChildPos(uint8_t k) override; - //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX - //! if there are no children matching the criteria - idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; - //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position - idx_t GetNextPos(idx_t pos) override; - //! Get Node256 Child - Node *GetChild(ART &art, idx_t pos) override; - - //! Replace child pointer - void ReplaceChildPointer(idx_t pos, Node *node) override; - idx_t GetMin() override; - - //! Insert node From Node256 - static void Insert(Node *&node, uint8_t key_byte, Node *child); - //! Shrink to node 48 - static void Erase(Node *&node, int pos, ART &art); -}; -} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/execution/index/art/iterator.hpp +// duckdb/storage/buffer/block_handle.hpp // // //===----------------------------------------------------------------------===// @@ -4794,444 +5268,565 @@ class Node256 : public Node { -namespace duckdb { - -struct IteratorEntry { - IteratorEntry() { - } - IteratorEntry(Node *node, idx_t pos) : node(node), pos(pos) { - } - Node *node = nullptr; - idx_t pos = 0; -}; -//! Keeps track of the current key in the iterator -class IteratorCurrentKey { -public: - //! Push Byte - void Push(uint8_t key); - //! Pops n elements - void Pop(idx_t n); - //! Subscript operator - uint8_t &operator[](idx_t idx); - bool operator>(const Key &k) const; - bool operator>=(const Key &k) const; - bool operator==(const Key &k) const; +namespace duckdb { +class BlockManager; +class BufferHandle; +class BufferManager; +class DatabaseInstance; +class FileBuffer; -private: - //! The current key position - idx_t cur_key_pos = 0; - //! The current key of the Leaf Node - vector key; -}; +enum class BlockState : uint8_t { BLOCK_UNLOADED = 0, BLOCK_LOADED = 1 }; -class Iterator { -public: - //! Current Key - IteratorCurrentKey cur_key; - //! Pointer to the ART tree we are iterating - ART *art = nullptr; +struct BufferPoolReservation { + idx_t size {0}; - //! Scan the tree - bool Scan(Key *bound, idx_t max_count, vector &result_ids, bool is_inclusive); - //! Finds minimum value of the tree - void FindMinimum(Node &node); - //! Goes to lower bound - bool LowerBound(Node *node, Key &key, bool inclusive); + BufferPoolReservation() { + } + BufferPoolReservation(const BufferPoolReservation &) = delete; + BufferPoolReservation &operator=(const BufferPoolReservation &) = delete; -private: - //! Stack of iterator entries - stack nodes; - //! Last visited leaf - Leaf *last_leaf = nullptr; - //! Go to the next node - bool Next(); - //! Push part of the key to cur_key - void PushKey(Node *node, uint16_t pos); -}; -} // namespace duckdb + BufferPoolReservation(BufferPoolReservation &&) noexcept; + BufferPoolReservation &operator=(BufferPoolReservation &&) noexcept; + ~BufferPoolReservation(); -namespace duckdb { + void Resize(atomic &counter, idx_t new_size); + void Merge(BufferPoolReservation &&src); +}; -struct ARTIndexScanState : public IndexScanState { - ARTIndexScanState() : checked(false), result_index(0) { +struct TempBufferPoolReservation : BufferPoolReservation { + atomic &counter; + TempBufferPoolReservation(atomic &counter, idx_t size) : counter(counter) { + Resize(counter, size); + } + TempBufferPoolReservation(TempBufferPoolReservation &&) = default; + ~TempBufferPoolReservation() { + Resize(counter, 0); } - - Value values[2]; - ExpressionType expressions[2]; - bool checked; - vector result_ids; - Iterator iterator; - //! Stores the current leaf - Leaf *cur_leaf = nullptr; - //! Offset to leaf - idx_t result_index = 0; }; -enum VerifyExistenceType : uint8_t { - APPEND = 0, // for purpose to append into table - APPEND_FK = 1, // for purpose to append into table has foreign key - DELETE_FK = 2 // for purpose to delete from table related to foreign key -}; +class BlockHandle { + friend class BlockManager; + friend struct BufferEvictionNode; + friend class BufferHandle; + friend class BufferManager; -class ART : public Index { public: - ART(const vector &column_ids, const vector> &unbound_expressions, - IndexConstraintType constraint_type, DatabaseInstance &db, idx_t block_id = DConstants::INVALID_INDEX, - idx_t block_offset = DConstants::INVALID_INDEX); - ~ART() override; + BlockHandle(BlockManager &block_manager, block_id_t block_id); + BlockHandle(BlockManager &block_manager, block_id_t block_id, unique_ptr buffer, bool can_destroy, + idx_t block_size, BufferPoolReservation &&reservation); + ~BlockHandle(); - //! Root of the tree - Node *tree; + BlockManager &block_manager; - DatabaseInstance &db; +public: + block_id_t BlockId() { + return block_id; + } - //! Initialize a scan on the index with the given expression and column ids - //! to fetch from the base table for a single predicate - unique_ptr InitializeScanSinglePredicate(Transaction &transaction, Value value, - ExpressionType expression_type) override; + int32_t Readers() const { + return readers; + } - //! Initialize a scan on the index with the given expression and column ids - //! to fetch from the base table for two predicates - unique_ptr InitializeScanTwoPredicates(Transaction &transaction, Value low_value, - ExpressionType low_expression_type, Value high_value, - ExpressionType high_expression_type) override; + inline bool IsSwizzled() const { + return !unswizzled; + } - //! Perform a lookup on the index - bool Scan(Transaction &transaction, DataTable &table, IndexScanState &state, idx_t max_count, - vector &result_ids) override; - //! Append entries to the index - bool Append(IndexLock &lock, DataChunk &entries, Vector &row_identifiers) override; - //! Verify that data can be appended to the index - void VerifyAppend(DataChunk &chunk) override; - //! Verify that data can be appended to the index for foreign key constraint - void VerifyAppendForeignKey(DataChunk &chunk, string *err_msg_ptr) override; - //! Verify that data can be delete from the index for foreign key constraint - void VerifyDeleteForeignKey(DataChunk &chunk, string *err_msg_ptr) override; - //! Delete entries in the index - void Delete(IndexLock &lock, DataChunk &entries, Vector &row_identifiers) override; - //! Insert data into the index. - bool Insert(IndexLock &lock, DataChunk &data, Vector &row_ids) override; + inline void SetSwizzling(const char *unswizzler) { + unswizzled = unswizzler; + } - bool SearchEqual(ARTIndexScanState *state, idx_t max_count, vector &result_ids); - //! Search Equal used for Joins that do not need to fetch data - void SearchEqualJoinNoFetch(Value &equal_value, idx_t &result_size); - //! Serialized the ART - BlockPointer Serialize(duckdb::MetaBlockWriter &writer) override; + inline void SetCanDestroy(bool can_destroy_p) { + can_destroy = can_destroy_p; + } private: - //! Insert a row id into a leaf node - bool InsertToLeaf(Leaf &leaf, row_t row_id); - //! Insert the leaf value into the tree - bool Insert(Node *&node, unique_ptr key, unsigned depth, row_t row_id); - - //! Erase element from leaf (if leaf has more than one value) or eliminate the leaf itself - void Erase(Node *&node, Key &key, unsigned depth, row_t row_id); - - //! Find the node with a matching key, optimistic version - Node *Lookup(Node *node, Key &key, unsigned depth); - - bool SearchGreater(ARTIndexScanState *state, bool inclusive, idx_t max_count, vector &result_ids); - bool SearchLess(ARTIndexScanState *state, bool inclusive, idx_t max_count, vector &result_ids); - bool SearchCloseRange(ARTIndexScanState *state, bool left_inclusive, bool right_inclusive, idx_t max_count, - vector &result_ids); - - void GenerateKeys(DataChunk &input, vector> &keys); + static BufferHandle Load(shared_ptr &handle, unique_ptr buffer = nullptr); + unique_ptr UnloadAndTakeBlock(); + void Unload(); + bool CanUnload(); - void VerifyExistence(DataChunk &chunk, VerifyExistenceType verify_type, string *err_msg_ptr = nullptr); + //! The block-level lock + mutex lock; + //! Whether or not the block is loaded/unloaded + atomic state; + //! Amount of concurrent readers + atomic readers; + //! The block id of the block + const block_id_t block_id; + //! Pointer to loaded data (if any) + unique_ptr buffer; + //! Internal eviction timestamp + atomic eviction_timestamp; + //! Whether or not the buffer can be destroyed (only used for temporary buffers) + bool can_destroy; + //! The memory usage of the block (when loaded). If we are pinning/loading + //! an unloaded block, this tells us how much memory to reserve. + idx_t memory_usage; + //! Current memory reservation / usage + BufferPoolReservation memory_charge; + //! Does the block contain any memory pointers? + const char *unswizzled; }; } // namespace duckdb + namespace duckdb { +class BlockManager; +class DatabaseInstance; +class TemporaryDirectoryHandle; +struct EvictionQueue; -IndexCatalogEntry::IndexCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateIndexInfo *info) - : StandardEntry(CatalogType::INDEX_ENTRY, schema, catalog, info->index_name), index(nullptr), sql(info->sql) { -} +//! The buffer manager is in charge of handling memory management for the database. It hands out memory buffers that can +//! be used by the database internally. +// +//! BlockIds are NOT unique within the context of a BufferManager. A buffer manager +//! can be shared by many BlockManagers. +class BufferManager { + friend class BufferHandle; + friend class BlockHandle; + friend class BlockManager; -IndexCatalogEntry::~IndexCatalogEntry() { - // remove the associated index from the info - if (!info || !index) { - return; - } - info->indexes.RemoveIndex(index); -} +public: + BufferManager(DatabaseInstance &db, string temp_directory, idx_t maximum_memory); + virtual ~BufferManager(); -string IndexCatalogEntry::ToSQL() { - if (sql.empty()) { - throw InternalException("Cannot convert INDEX to SQL because it was not created with a SQL statement"); - } - if (sql[sql.size() - 1] != ';') { - sql += ";"; - } - return sql; -} + //! Register an in-memory buffer of arbitrary size, as long as it is >= BLOCK_SIZE. can_destroy signifies whether or + //! not the buffer can be destroyed when unpinned, or whether or not it needs to be written to a temporary file so + //! it can be reloaded. The resulting buffer will already be allocated, but needs to be pinned in order to be used. + shared_ptr RegisterMemory(idx_t block_size, bool can_destroy); + //! Registers an in-memory buffer that cannot be unloaded until it is destroyed + //! This buffer can be small (smaller than BLOCK_SIZE) + //! Unpin and pin are nops on this block of memory + shared_ptr RegisterSmallMemory(idx_t block_size); -void IndexCatalogEntry::Serialize(duckdb::MetaBlockWriter &serializer) { - // Here we serialize the index metadata in the following order: - // schema name, table name, index name, sql, index type, index constraint type, expression list. - // column_ids, unbound_expression - FieldWriter writer(serializer); - writer.WriteString(info->schema); - writer.WriteString(info->table); - writer.WriteString(name); - writer.WriteString(sql); - writer.WriteField(index->type); - writer.WriteField(index->constraint_type); - writer.WriteSerializableList(expressions); - writer.WriteSerializableList(parsed_expressions); - writer.WriteList(index->column_ids); - writer.Finalize(); -} + //! Allocate an in-memory buffer with a single pin. + //! The allocated memory is released when the buffer handle is destroyed. + DUCKDB_API BufferHandle Allocate(idx_t block_size); -unique_ptr IndexCatalogEntry::Deserialize(Deserializer &source, ClientContext &context) { - // Here we deserialize the index metadata in the following order: - // root block, root offset, schema name, table name, index name, sql, index type, index constraint type, expression - // list. + //! Reallocate an in-memory buffer that is pinned. + void ReAllocate(shared_ptr &handle, idx_t block_size); - auto create_index_info = make_unique(); + BufferHandle Pin(shared_ptr &handle); + void Unpin(shared_ptr &handle); - FieldReader reader(source); + //! Set a new memory limit to the buffer manager, throws an exception if the new limit is too low and not enough + //! blocks can be evicted + void SetLimit(idx_t limit = (idx_t)-1); - create_index_info->schema = reader.ReadRequired(); - create_index_info->table = make_unique(); - create_index_info->table->schema_name = create_index_info->schema; - create_index_info->table->table_name = reader.ReadRequired(); - create_index_info->index_name = reader.ReadRequired(); - create_index_info->sql = reader.ReadRequired(); - create_index_info->index_type = IndexType(reader.ReadRequired()); - create_index_info->constraint_type = IndexConstraintType(reader.ReadRequired()); - create_index_info->expressions = reader.ReadRequiredSerializableList(); - create_index_info->parsed_expressions = reader.ReadRequiredSerializableList(); + static BufferManager &GetBufferManager(ClientContext &context); + DUCKDB_API static BufferManager &GetBufferManager(DatabaseInstance &db); - create_index_info->column_ids = reader.ReadRequiredList(); - reader.Finalize(); - return create_index_info; -} + idx_t GetUsedMemory() { + return current_memory; + } + idx_t GetMaxMemory() { + return maximum_memory; + } -} // namespace duckdb + const string &GetTemporaryDirectory() { + return temp_directory; + } + void SetTemporaryDirectory(string new_dir); + DUCKDB_API Allocator &GetBufferAllocator(); -namespace duckdb { + DatabaseInstance &GetDatabase() { + return db; + } -PragmaFunctionCatalogEntry::PragmaFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, - CreatePragmaFunctionInfo *info) - : StandardEntry(CatalogType::PRAGMA_FUNCTION_ENTRY, schema, catalog, info->name), functions(move(info->functions)) { -} + //! Construct a managed buffer. + //! The block_id is just used for internal tracking. It doesn't map to any actual + //! BlockManager. + virtual unique_ptr ConstructManagedBuffer(idx_t size, unique_ptr &&source, + FileBufferType type = FileBufferType::MANAGED_BUFFER); -} // namespace duckdb + DUCKDB_API void ReserveMemory(idx_t size); + DUCKDB_API void FreeReservedMemory(idx_t size); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/catalog/catalog_entry/macro_catalog_entry.hpp -// -// -//===----------------------------------------------------------------------===// +private: + //! Evict blocks until the currently used memory + extra_memory fit, returns false if this was not possible + //! (i.e. not enough blocks could be evicted) + //! If the "buffer" argument is specified AND the system can find a buffer to re-use for the given allocation size + //! "buffer" will be made to point to the re-usable memory. Note that this is not guaranteed. + //! Returns a pair. result.first indicates if eviction was successful. result.second contains the + //! reservation handle, which can be moved to the BlockHandle that will own the reservation. + struct EvictionResult { + bool success; + TempBufferPoolReservation reservation; + }; + EvictionResult EvictBlocks(idx_t extra_memory, idx_t memory_limit, unique_ptr *buffer = nullptr); + //! Helper + template + TempBufferPoolReservation EvictBlocksOrThrow(idx_t extra_memory, idx_t limit, unique_ptr *buffer, + ARGS...); + //! Garbage collect eviction queue + void PurgeQueue(); + //! Write a temporary buffer to disk + void WriteTemporaryBuffer(block_id_t block_id, FileBuffer &buffer); + //! Read a temporary buffer from disk + unique_ptr ReadTemporaryBuffer(block_id_t id, unique_ptr buffer = nullptr); + //! Get the path of the temporary buffer + string GetTemporaryPath(block_id_t id); + void DeleteTemporaryFile(block_id_t id); + void RequireTemporaryDirectory(); + void AddToEvictionQueue(shared_ptr &handle); -namespace duckdb { + string InMemoryWarning(); -//! A macro function in the catalog -class TableMacroCatalogEntry : public MacroCatalogEntry { -public: - TableMacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info); + static data_ptr_t BufferAllocatorAllocate(PrivateAllocatorData *private_data, idx_t size); + static void BufferAllocatorFree(PrivateAllocatorData *private_data, data_ptr_t pointer, idx_t size); + static data_ptr_t BufferAllocatorRealloc(PrivateAllocatorData *private_data, data_ptr_t pointer, idx_t old_size, + idx_t size); -public: - //! Serialize the meta information of the ScalarMacroCatalogEntry - void Serialize(Serializer &serializer) override; - //! Deserializes to a CreateMacroInfo - static unique_ptr Deserialize(Deserializer &source, ClientContext &context); + //! When the BlockHandle reaches 0 readers, this creates a new FileBuffer for this BlockHandle and + //! overwrites the data within with garbage. Any readers that do not hold the pin will notice TODO rewrite + void VerifyZeroReaders(shared_ptr &handle); + +private: + //! The database instance + DatabaseInstance &db; + //! The lock for changing the memory limit + mutex limit_lock; + //! The current amount of memory that is occupied by the buffer manager (in bytes) + atomic current_memory; + //! The maximum amount of memory that the buffer manager can keep (in bytes) + atomic maximum_memory; + //! The directory name where temporary files are stored + string temp_directory; + //! Lock for creating the temp handle + mutex temp_handle_lock; + //! Handle for the temporary directory + unique_ptr temp_directory_handle; + //! Eviction queue + unique_ptr queue; + //! The temporary id used for managed buffers + atomic temporary_id; + //! Total number of insertions into the eviction queue. This guides the schedule for calling PurgeQueue. + atomic queue_insertions; + //! Allocator associated with the buffer manager, that passes all allocations through this buffer manager + Allocator buffer_allocator; + //! Block manager for temp data + unique_ptr temp_block_manager; }; } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/function/scalar_macro_function.hpp -// -// -//===----------------------------------------------------------------------===// +namespace duckdb { +struct RowDataBlock { +public: + RowDataBlock(BufferManager &buffer_manager, idx_t capacity, idx_t entry_size) + : capacity(capacity), entry_size(entry_size), count(0), byte_offset(0) { + idx_t size = MaxValue(Storage::BLOCK_SIZE, capacity * entry_size); + block = buffer_manager.RegisterMemory(size, false); + } + explicit RowDataBlock(idx_t entry_size) : entry_size(entry_size) { + } + //! The buffer block handle + shared_ptr block; + //! Capacity (number of entries) and entry size that fit in this block + idx_t capacity; + const idx_t entry_size; + //! Number of entries currently in this block + idx_t count; + //! Write offset (if variable size entries) + idx_t byte_offset; -//! The SelectStatement of the view +private: + //! Implicit copying is not allowed + RowDataBlock(const RowDataBlock &) = delete; +public: + unique_ptr Copy() { + auto result = make_unique(entry_size); + result->block = block; + result->capacity = capacity; + result->count = count; + result->byte_offset = byte_offset; + return result; + } +}; +struct BlockAppendEntry { + BlockAppendEntry(data_ptr_t baseptr, idx_t count) : baseptr(baseptr), count(count) { + } + data_ptr_t baseptr; + idx_t count; +}; +class RowDataCollection { +public: + RowDataCollection(BufferManager &buffer_manager, idx_t block_capacity, idx_t entry_size, bool keep_pinned = false); + unique_ptr CloneEmpty(bool keep_pinned = false) const { + return make_unique(buffer_manager, block_capacity, entry_size, keep_pinned); + } + //! BufferManager + BufferManager &buffer_manager; + //! The total number of stored entries + idx_t count; + //! The number of entries per block + idx_t block_capacity; + //! Size of entries in the blocks + idx_t entry_size; + //! The blocks holding the main data + vector> blocks; + //! The blocks that this collection currently has pinned + vector pinned_blocks; + //! Whether the blocks should stay pinned (necessary for e.g. a heap) + const bool keep_pinned; +public: + idx_t AppendToBlock(RowDataBlock &block, BufferHandle &handle, vector &append_entries, + idx_t remaining, idx_t entry_sizes[]); + RowDataBlock &CreateBlock(); + vector Build(idx_t added_count, data_ptr_t key_locations[], idx_t entry_sizes[], + const SelectionVector *sel = FlatVector::IncrementalSelectionVector()); + void Merge(RowDataCollection &other); -namespace duckdb { + void Clear() { + blocks.clear(); + pinned_blocks.clear(); + count = 0; + } -class ScalarMacroFunction : public MacroFunction { -public: - ScalarMacroFunction(unique_ptr expression); + //! The size (in bytes) of this RowDataCollection if it were stored in a single block + idx_t SizeInBytes() const { + idx_t bytes = 0; + if (entry_size == 1) { + for (auto &block : blocks) { + bytes += block->byte_offset; + } + } else { + bytes = count * entry_size; + } + return bytes; + } - ScalarMacroFunction(void); - //! The macro expression - unique_ptr expression; + static inline idx_t EntriesPerBlock(idx_t width) { + return Storage::BLOCK_SIZE / width; + } -public: - unique_ptr Copy() override; +private: + mutex rdc_lock; - string ToSQL(const string &schema, const string &name) override; + //! Copying is not allowed + RowDataCollection(const RowDataCollection &) = delete; }; } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/function/table_macro_function.hpp -// -// -//===----------------------------------------------------------------------===// - - +namespace duckdb { +class RowLayout; +struct LocalSortState; +struct SortConstants { + static constexpr idx_t VALUES_PER_RADIX = 256; + static constexpr idx_t MSD_RADIX_LOCATIONS = VALUES_PER_RADIX + 1; + static constexpr idx_t INSERTION_SORT_THRESHOLD = 24; + static constexpr idx_t MSD_RADIX_SORT_SIZE_THRESHOLD = 4; +}; +struct SortLayout { +public: + SortLayout() { + } + explicit SortLayout(const vector &orders); + SortLayout GetPrefixComparisonLayout(idx_t num_prefix_cols) const; +public: + idx_t column_count; + vector order_types; + vector order_by_null_types; + vector logical_types; + bool all_constant; + vector constant_size; + vector column_sizes; + vector prefix_lengths; + vector stats; + vector has_null; + idx_t comparison_size; + idx_t entry_size; -namespace duckdb { + RowLayout blob_layout; + unordered_map sorting_to_blob_col; +}; -class TableMacroFunction : public MacroFunction { +struct GlobalSortState { public: - TableMacroFunction(unique_ptr query_node); - TableMacroFunction(void); + GlobalSortState(BufferManager &buffer_manager, const vector &orders, RowLayout &payload_layout); - //! The main query node - unique_ptr query_node; + //! Add local state sorted data to this global state + void AddLocalState(LocalSortState &local_sort_state); + //! Prepares the GlobalSortState for the merge sort phase (after completing radix sort phase) + void PrepareMergePhase(); + //! Initializes the global sort state for another round of merging + void InitializeMergeRound(); + //! Completes the cascaded merge sort round. + //! Pass true if you wish to use the radix data for further comparisons. + void CompleteMergeRound(bool keep_radix_data = false); + //! Print the sorted data to the console. + void Print(); public: - unique_ptr Copy() override; + //! The lock for updating the order global state + mutex lock; + //! The buffer manager + BufferManager &buffer_manager; - string ToSQL(const string &schema, const string &name) override; -}; + //! Sorting and payload layouts + const SortLayout sort_layout; + const RowLayout payload_layout; -} // namespace duckdb + //! Sorted data + vector> sorted_blocks; + vector>> sorted_blocks_temp; + unique_ptr odd_one_out; + //! Pinned heap data (if sorting in memory) + vector> heap_blocks; + vector pinned_blocks; -namespace duckdb { + //! Capacity (number of rows) used to initialize blocks + idx_t block_capacity; + //! Whether we are doing an external sort + bool external; -MacroCatalogEntry::MacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info) - : StandardEntry( - (info->function->type == MacroType::SCALAR_MACRO ? CatalogType::MACRO_ENTRY : CatalogType::TABLE_MACRO_ENTRY), - schema, catalog, info->name), - function(move(info->function)) { - this->temporary = info->temporary; - this->internal = info->internal; -} + //! Progress in merge path stage + idx_t pair_idx; + idx_t num_pairs; + idx_t l_start; + idx_t r_start; +}; -ScalarMacroCatalogEntry::ScalarMacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info) - : MacroCatalogEntry(catalog, schema, info) { -} +struct LocalSortState { +public: + LocalSortState(); -void ScalarMacroCatalogEntry::Serialize(Serializer &main_serializer) { - D_ASSERT(!internal); - auto &scalar_function = (ScalarMacroFunction &)*function; - FieldWriter writer(main_serializer); - writer.WriteString(schema->name); - writer.WriteString(name); - writer.WriteSerializable(*scalar_function.expression); - // writer.WriteSerializableList(function->parameters); - writer.WriteSerializableList(function->parameters); - writer.WriteField((uint32_t)function->default_parameters.size()); - auto &serializer = writer.GetSerializer(); - for (auto &kv : function->default_parameters) { - serializer.WriteString(kv.first); - kv.second->Serialize(serializer); - } - writer.Finalize(); -} + //! Initialize the layouts and RowDataCollections + void Initialize(GlobalSortState &global_sort_state, BufferManager &buffer_manager_p); + //! Sink one DataChunk into the local sort state + void SinkChunk(DataChunk &sort, DataChunk &payload); + //! Size of accumulated data in bytes + idx_t SizeInBytes() const; + //! Sort the data accumulated so far + void Sort(GlobalSortState &global_sort_state, bool reorder_heap); + //! Concatenate the blocks held by a RowDataCollection into a single block + static unique_ptr ConcatenateBlocks(RowDataCollection &row_data); -unique_ptr ScalarMacroCatalogEntry::Deserialize(Deserializer &main_source, ClientContext &context) { - auto info = make_unique(CatalogType::MACRO_ENTRY); - FieldReader reader(main_source); - info->schema = reader.ReadRequired(); - info->name = reader.ReadRequired(); - auto expression = reader.ReadRequiredSerializable(); - auto func = make_unique(move(expression)); - info->function = move(func); - info->function->parameters = reader.ReadRequiredSerializableList(); - auto default_param_count = reader.ReadRequired(); - auto &source = reader.GetSource(); - for (idx_t i = 0; i < default_param_count; i++) { - auto name = source.Read(); - info->function->default_parameters[name] = ParsedExpression::Deserialize(source); - } - // dont like this - // info->type=CatalogType::MACRO_ENTRY; - reader.Finalize(); - return info; -} +private: + //! Sorts the data in the newly created SortedBlock + void SortInMemory(); + //! Re-order the local state after sorting + void ReOrder(GlobalSortState &gstate, bool reorder_heap); + //! Re-order a SortedData object after sorting + void ReOrder(SortedData &sd, data_ptr_t sorting_ptr, RowDataCollection &heap, GlobalSortState &gstate, + bool reorder_heap); -TableMacroCatalogEntry::TableMacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info) - : MacroCatalogEntry(catalog, schema, info) { -} +public: + //! Whether this local state has been initialized + bool initialized; + //! The buffer manager + BufferManager *buffer_manager; + //! The sorting and payload layouts + const SortLayout *sort_layout; + const RowLayout *payload_layout; + //! Radix/memcmp sortable data + unique_ptr radix_sorting_data; + //! Variable sized sorting data and accompanying heap + unique_ptr blob_sorting_data; + unique_ptr blob_sorting_heap; + //! Payload data and accompanying heap + unique_ptr payload_data; + unique_ptr payload_heap; + //! Sorted data + vector> sorted_blocks; -void TableMacroCatalogEntry::Serialize(Serializer &main_serializer) { - D_ASSERT(!internal); - FieldWriter writer(main_serializer); +private: + //! Selection vector and addresses for scattering the data to rows + const SelectionVector &sel_ptr = *FlatVector::IncrementalSelectionVector(); + Vector addresses = Vector(LogicalType::POINTER); +}; - auto &table_function = (TableMacroFunction &)*function; - writer.WriteString(schema->name); - writer.WriteString(name); - writer.WriteSerializable(*table_function.query_node); - writer.WriteSerializableList(function->parameters); - writer.WriteField((uint32_t)function->default_parameters.size()); - auto &serializer = writer.GetSerializer(); - for (auto &kv : function->default_parameters) { - serializer.WriteString(kv.first); - kv.second->Serialize(serializer); - } - writer.Finalize(); -} +struct MergeSorter { +public: + MergeSorter(GlobalSortState &state, BufferManager &buffer_manager); -unique_ptr TableMacroCatalogEntry::Deserialize(Deserializer &main_source, ClientContext &context) { - auto info = make_unique(CatalogType::TABLE_MACRO_ENTRY); - FieldReader reader(main_source); - info->schema = reader.ReadRequired(); - info->name = reader.ReadRequired(); - auto query_node = reader.ReadRequiredSerializable(); - auto table_function = make_unique(move(query_node)); - info->function = move(table_function); - info->function->parameters = reader.ReadRequiredSerializableList(); - auto default_param_count = reader.ReadRequired(); - auto &source = reader.GetSource(); - for (idx_t i = 0; i < default_param_count; i++) { - auto name = source.Read(); - info->function->default_parameters[name] = ParsedExpression::Deserialize(source); - } + //! Finds and merges partitions until the current cascaded merge round is finished + void PerformInMergeRound(); - reader.Finalize(); +private: + //! The global sorting state + GlobalSortState &state; + //! The sorting and payload layouts + BufferManager &buffer_manager; + const SortLayout &sort_layout; - return info; -} + //! The left and right reader + unique_ptr left; + unique_ptr right; -} // namespace duckdb + //! Input and output blocks + unique_ptr left_input; + unique_ptr right_input; + SortedBlock *result; + +private: + //! Computes the left and right block that will be merged next (Merge Path partition) + void GetNextPartition(); + //! Finds the boundary of the next partition using binary search + void GetIntersection(const idx_t diagonal, idx_t &l_idx, idx_t &r_idx); + //! Compare values within SortedBlocks using a global index + int CompareUsingGlobalIndex(SBScanState &l, SBScanState &r, const idx_t l_idx, const idx_t r_idx); + //! Finds the next partition and merges it + void MergePartition(); + //! Computes how the next 'count' tuples should be merged by setting the 'left_smaller' array + void ComputeMerge(const idx_t &count, bool left_smaller[]); + //! Merges the radix sorting blocks according to the 'left_smaller' array + void MergeRadix(const idx_t &count, const bool left_smaller[]); + //! Merges SortedData according to the 'left_smaller' array + void MergeData(SortedData &result_data, SortedData &l_data, SortedData &r_data, const idx_t &count, + const bool left_smaller[], idx_t next_entry_sizes[], bool reset_indices); + //! Merges constant size rows according to the 'left_smaller' array + void MergeRows(data_ptr_t &l_ptr, idx_t &l_entry_idx, const idx_t &l_count, data_ptr_t &r_ptr, idx_t &r_entry_idx, + const idx_t &r_count, RowDataBlock &target_block, data_ptr_t &target_ptr, const idx_t &entry_size, + const bool left_smaller[], idx_t &copied, const idx_t &count); + //! Flushes constant size rows into the result + void FlushRows(data_ptr_t &source_ptr, idx_t &source_entry_idx, const idx_t &source_count, + RowDataBlock &target_block, data_ptr_t &target_ptr, const idx_t &entry_size, idx_t &copied, + const idx_t &count); + //! Flushes blob rows and accompanying heap + void FlushBlobs(const RowLayout &layout, const idx_t &source_count, data_ptr_t &source_data_ptr, + idx_t &source_entry_idx, data_ptr_t &source_heap_ptr, RowDataBlock &target_data_block, + data_ptr_t &target_data_ptr, RowDataBlock &target_heap_block, BufferHandle &target_heap_handle, + data_ptr_t &target_heap_ptr, idx_t &copied, const idx_t &count); +}; +} // namespace duckdb @@ -5239,102 +5834,214 @@ unique_ptr TableMacroCatalogEntry::Deserialize(Deserializer &ma +namespace duckdb { +class ClientContext; +class TableIOManager; +class Transaction; +struct IndexLock; +//! The index is an abstract base class that serves as the basis for indexes +class Index { +public: + Index(IndexType type, TableIOManager &table_io_manager, const vector &column_ids, + const vector> &unbound_expressions, IndexConstraintType constraint_type); + virtual ~Index() = default; + //! The type of the index + IndexType type; + //! Associated table io manager + TableIOManager &table_io_manager; + //! Column identifiers to extract from the base table + vector column_ids; + //! unordered_set of column_ids used by the index + unordered_set column_id_set; + //! Unbound expressions used by the index + vector> unbound_expressions; + //! The physical types stored in the index + vector types; + //! The logical types of the expressions + vector logical_types; + //! constraint type + IndexConstraintType constraint_type; -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/catalog/default/default_functions.hpp -// -// -//===----------------------------------------------------------------------===// +public: + //! Initialize a scan on the index with the given expression and column ids + //! to fetch from the base table when we only have one query predicate + virtual unique_ptr InitializeScanSinglePredicate(Transaction &transaction, Value value, + ExpressionType expressionType) = 0; + //! Initialize a scan on the index with the given expression and column ids + //! to fetch from the base table for two query predicates + virtual unique_ptr InitializeScanTwoPredicates(Transaction &transaction, Value low_value, + ExpressionType low_expression_type, Value high_value, + ExpressionType high_expression_type) = 0; + //! Perform a lookup on the index, fetching up to max_count result ids. Returns true if all row ids were fetched, + //! and false otherwise. + virtual bool Scan(Transaction &transaction, DataTable &table, IndexScanState &state, idx_t max_count, + vector &result_ids) = 0; + //! Obtain a lock on the index + virtual void InitializeLock(IndexLock &state); + //! Called when data is appended to the index. The lock obtained from InitializeAppend must be held + virtual bool Append(IndexLock &state, DataChunk &entries, Vector &row_identifiers) = 0; + bool Append(DataChunk &entries, Vector &row_identifiers); + //! Verify that data can be appended to the index + virtual void VerifyAppend(DataChunk &chunk) = 0; + //! Verify that data can be appended to the index for foreign key constraint + virtual void VerifyAppendForeignKey(DataChunk &chunk, string *err_msg_ptr) = 0; + //! Verify that data can be delete from the index for foreign key constraint + virtual void VerifyDeleteForeignKey(DataChunk &chunk, string *err_msg_ptr) = 0; + //! Called when data inside the index is Deleted + virtual void Delete(IndexLock &state, DataChunk &entries, Vector &row_identifiers) = 0; + void Delete(DataChunk &entries, Vector &row_identifiers); + //! Insert data into the index. Does not lock the index. + virtual bool Insert(IndexLock &lock, DataChunk &input, Vector &row_identifiers) = 0; + //! Construct an index from sorted chunks of keys. + virtual void ConstructAndMerge(IndexLock &lock, PayloadScanner &scanner, Allocator &allocator) = 0; + //! Merge other_index into this index. + virtual bool MergeIndexes(IndexLock &state, Index *other_index) = 0; + bool MergeIndexes(Index *other_index); + //! Returns the string representation of an index + virtual string ToString() = 0; -namespace duckdb { -class SchemaCatalogEntry; + //! Returns true if the index is affected by updates on the specified column ids, and false otherwise + bool IndexIsUpdated(const vector &column_ids) const; -struct DefaultMacro { - const char *schema; - const char *name; - const char *parameters[8]; - const char *macro; -}; + //! Returns unique flag + bool IsUnique() { + return (constraint_type == IndexConstraintType::UNIQUE || constraint_type == IndexConstraintType::PRIMARY); + } + //! Returns primary flag + bool IsPrimary() { + return (constraint_type == IndexConstraintType::PRIMARY); + } + //! Returns foreign flag + bool IsForeign() { + return (constraint_type == IndexConstraintType::FOREIGN); + } + //! Serializes the index and returns the pair of block_id offset positions + virtual BlockPointer Serialize(duckdb::MetaBlockWriter &writer); -class DefaultFunctionGenerator : public DefaultGenerator { -public: - DefaultFunctionGenerator(Catalog &catalog, SchemaCatalogEntry *schema); + //! Returns block/offset of where index was most recently serialized. + BlockPointer GetSerializedDataPointer() const { + return serialized_data_pointer; + } - SchemaCatalogEntry *schema; +protected: + void ExecuteExpressions(DataChunk &input, DataChunk &result); - DUCKDB_API static unique_ptr CreateInternalMacroInfo(DefaultMacro &default_macro); - DUCKDB_API static unique_ptr CreateInternalTableMacroInfo(DefaultMacro &default_macro); + //! Lock used for updating the index + mutex lock; -public: - unique_ptr CreateDefaultEntry(ClientContext &context, const string &entry_name) override; - vector GetDefaultEntries() override; + //! Pointer to most recently checkpointed index data. + BlockPointer serialized_data_pointer; private: - static unique_ptr CreateInternalTableMacroInfo(DefaultMacro &default_macro, - unique_ptr function); + //! Bound expressions used by the index + vector> bound_expressions; + //! Expression executor for the index expressions + ExpressionExecutor executor; + + unique_ptr BindExpression(unique_ptr expr); }; } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/catalog/default/default_views.hpp -// -// -//===----------------------------------------------------------------------===// +namespace duckdb { +class TableIndexList { +public: + //! Scan the catalog set, invoking the callback method for every entry + template + void Scan(T &&callback) { + // lock the catalog set + lock_guard lock(indexes_lock); + for (auto &index : indexes) { + if (callback(*index)) { + break; + } + } + } + void AddIndex(unique_ptr index); + void RemoveIndex(Index *index); + bool Empty(); + idx_t Count(); + void Move(TableIndexList &other); -namespace duckdb { -class SchemaCatalogEntry; + Index *FindForeignKeyIndex(const vector &fk_keys, ForeignKeyType fk_type); + void VerifyForeignKey(const vector &fk_keys, bool is_append, DataChunk &chunk, + vector &err_msg); -class DefaultViewGenerator : public DefaultGenerator { -public: - DefaultViewGenerator(Catalog &catalog, SchemaCatalogEntry *schema); + //! Serialize all indexes owned by this table, returns a vector of block info of all indexes + vector SerializeIndexes(duckdb::MetaBlockWriter &writer); - SchemaCatalogEntry *schema; + vector GetRequiredColumns(); -public: - unique_ptr CreateDefaultEntry(ClientContext &context, const string &entry_name) override; - vector GetDefaultEntries() override; +private: + //! Indexes associated with the current table + mutex indexes_lock; + vector> indexes; }; - } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/algorithm.hpp -// -// -//===----------------------------------------------------------------------===// +namespace duckdb { +class CatalogEntry; +struct BoundCreateTableInfo { + explicit BoundCreateTableInfo(unique_ptr base_p) : base(move(base_p)) { + D_ASSERT(base); + } -#include + //! The schema to create the table in + SchemaCatalogEntry *schema; + //! The base CreateInfo object + unique_ptr base; + //! Column dependency manager of the table + ColumnDependencyManager column_dependency_manager; + //! List of constraints on the table + vector> constraints; + //! List of bound constraints on the table + vector> bound_constraints; + //! Bound default values + vector> bound_defaults; + //! Dependents of the table (in e.g. default values) + unordered_set dependencies; + //! The existing table data on disk (if any) + unique_ptr data; + //! CREATE TABLE from QUERY + unique_ptr query; + //! Indexes created by this table + vector indexes; + + //! Serializes a BoundCreateTableInfo to a stand-alone binary blob + void Serialize(Serializer &serializer) const; + //! Deserializes a blob back into a BoundCreateTableInfo + static unique_ptr Deserialize(Deserializer &source, PlanDeserializationState &state); + + CreateTableInfo &Base() { + D_ASSERT(base); + return (CreateTableInfo &)*base; + } +}; +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/parser/constraints/foreign_key_constraint.hpp +// duckdb/catalog/default/default_types.hpp // // //===----------------------------------------------------------------------===// @@ -5345,46 +6052,27 @@ class DefaultViewGenerator : public DefaultGenerator { namespace duckdb { +class SchemaCatalogEntry; -class ForeignKeyConstraint : public Constraint { +class DefaultTypeGenerator : public DefaultGenerator { public: - DUCKDB_API ForeignKeyConstraint(vector pk_columns, vector fk_columns, ForeignKeyInfo info); + DefaultTypeGenerator(Catalog &catalog, SchemaCatalogEntry *schema); - //! The set of main key table's columns - vector pk_columns; - //! The set of foreign key table's columns - vector fk_columns; - ForeignKeyInfo info; + SchemaCatalogEntry *schema; public: - DUCKDB_API string ToString() const override; - - DUCKDB_API unique_ptr Copy() const override; + DUCKDB_API static LogicalTypeId GetDefaultType(const string &name); - //! Serialize to a stand-alone binary blob - DUCKDB_API void Serialize(FieldWriter &writer) const override; - //! Deserializes a ParsedConstraint - DUCKDB_API static unique_ptr Deserialize(FieldReader &source); + unique_ptr CreateDefaultEntry(ClientContext &context, const string &entry_name) override; + vector GetDefaultEntries() override; }; } // namespace duckdb - - - - - - - - - - - - //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/planner/constraints/bound_foreign_key_constraint.hpp +// duckdb/main/extension_functions.hpp // // //===----------------------------------------------------------------------===// @@ -5393,544 +6081,769 @@ class ForeignKeyConstraint : public Constraint { - namespace duckdb { -class BoundForeignKeyConstraint : public BoundConstraint { -public: - BoundForeignKeyConstraint(ForeignKeyInfo info_p, unordered_set pk_key_set_p, - unordered_set fk_key_set_p) - : BoundConstraint(ConstraintType::FOREIGN_KEY), info(move(info_p)), pk_key_set(move(pk_key_set_p)), - fk_key_set(move(fk_key_set_p)) { -#ifdef DEBUG - D_ASSERT(info.pk_keys.size() == pk_key_set.size()); - for (auto &key : info.pk_keys) { - D_ASSERT(pk_key_set.find(key) != pk_key_set.end()); - } - D_ASSERT(info.fk_keys.size() == fk_key_set.size()); - for (auto &key : info.fk_keys) { - D_ASSERT(fk_key_set.find(key) != fk_key_set.end()); - } -#endif - } - - ForeignKeyInfo info; - //! The same keys but stored as an unordered set - unordered_set pk_key_set; - //! The same keys but stored as an unordered set - unordered_set fk_key_set; +struct ExtensionFunction { + char function[48]; + char extension[48]; }; +static constexpr ExtensionFunction EXTENSION_FUNCTIONS[] = { + {"->>", "json"}, + {"array_to_json", "json"}, + {"create_fts_index", "fts"}, + {"dbgen", "tpch"}, + {"drop_fts_index", "fts"}, + {"dsdgen", "tpcds"}, + {"excel_text", "excel"}, + {"from_json", "json"}, + {"from_json_strict", "json"}, + {"from_substrait", "substrait"}, + {"get_substrait", "substrait"}, + {"get_substrait_json", "substrait"}, + {"icu_calendar_names", "icu"}, + {"icu_sort_key", "icu"}, + {"json", "json"}, + {"json_array", "json"}, + {"json_array_length", "json"}, + {"json_contains", "json"}, + {"json_extract", "json"}, + {"json_extract_path", "json"}, + {"json_extract_path_text", "json"}, + {"json_extract_string", "json"}, + {"json_group_array", "json"}, + {"json_group_object", "json"}, + {"json_group_structure", "json"}, + {"json_merge_patch", "json"}, + {"json_object", "json"}, + {"json_quote", "json"}, + {"json_structure", "json"}, + {"json_transform", "json"}, + {"json_transform_strict", "json"}, + {"json_type", "json"}, + {"json_valid", "json"}, + {"make_timestamptz", "icu"}, + {"parquet_metadata", "parquet"}, + {"parquet_scan", "parquet"}, + {"parquet_schema", "parquet"}, + {"pg_timezone_names", "icu"}, + {"postgres_attach", "postgres_scanner"}, + {"postgres_scan", "postgres_scanner"}, + {"postgres_scan_pushdown", "postgres_scanner"}, + {"read_json_objects", "json"}, + {"read_ndjson_objects", "json"}, + {"read_parquet", "parquet"}, + {"row_to_json", "json"}, + {"scan_arrow_ipc", "arrow"}, + {"sqlite_attach", "sqlite_scanner"}, + {"sqlite_scan", "sqlite_scanner"}, + {"stem", "fts"}, + {"text", "excel"}, + {"to_arrow_ipc", "arrow"}, + {"to_json", "json"}, + {"tpcds", "tpcds"}, + {"tpcds_answers", "tpcds"}, + {"tpcds_queries", "tpcds"}, + {"tpch", "tpch"}, + {"tpch_answers", "tpch"}, + {"tpch_queries", "tpch"}, + {"visualize_diff_profiling_output", "visualizer"}, + {"visualize_json_profiling_output", "visualizer"}, + {"visualize_last_profiling_output", "visualizer"}, +}; } // namespace duckdb +#include +namespace duckdb { +string SimilarCatalogEntry::GetQualifiedName() const { + D_ASSERT(Found()); + return schema->name + "." + name; +} -#include - -namespace duckdb { +Catalog::Catalog(DatabaseInstance &db) + : db(db), schemas(make_unique(*this, make_unique(*this))), + dependency_manager(make_unique(*this)) { + catalog_version = 0; +} +Catalog::~Catalog() { +} -void FindForeignKeyInformation(CatalogEntry *entry, AlterForeignKeyType alter_fk_type, - vector> &fk_arrays) { - if (entry->type != CatalogType::TABLE_ENTRY) { - return; - } - auto *table_entry = (TableCatalogEntry *)entry; - for (idx_t i = 0; i < table_entry->constraints.size(); i++) { - auto &cond = table_entry->constraints[i]; - if (cond->type != ConstraintType::FOREIGN_KEY) { - continue; - } - auto &fk = (ForeignKeyConstraint &)*cond; - if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { - fk_arrays.push_back(make_unique(fk.info.schema, fk.info.table, false, entry->name, - fk.pk_columns, fk.fk_columns, fk.info.pk_keys, - fk.info.fk_keys, alter_fk_type)); - } else if (fk.info.type == ForeignKeyType::FK_TYPE_PRIMARY_KEY_TABLE && - alter_fk_type == AlterForeignKeyType::AFT_DELETE) { - throw CatalogException("Could not drop the table because this table is main key table of the table \"%s\"", - fk.info.table); - } - } +Catalog &Catalog::GetCatalog(ClientContext &context) { + return context.db->GetCatalog(); } -SchemaCatalogEntry::SchemaCatalogEntry(Catalog *catalog, string name_p, bool internal) - : CatalogEntry(CatalogType::SCHEMA_ENTRY, catalog, move(name_p)), - tables(*catalog, make_unique(*catalog, this)), indexes(*catalog), table_functions(*catalog), - copy_functions(*catalog), pragma_functions(*catalog), - functions(*catalog, make_unique(*catalog, this)), sequences(*catalog), - collations(*catalog), types(*catalog, make_unique(*catalog, this)) { - this->internal = internal; +CatalogEntry *Catalog::CreateTable(ClientContext &context, BoundCreateTableInfo *info) { + auto schema = GetSchema(context, info->base->schema); + return CreateTable(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::AddEntry(ClientContext &context, unique_ptr entry, - OnCreateConflict on_conflict, unordered_set dependencies) { - auto entry_name = entry->name; - auto entry_type = entry->type; - auto result = entry.get(); +CatalogEntry *Catalog::CreateTable(ClientContext &context, unique_ptr info) { + auto binder = Binder::CreateBinder(context); + auto bound_info = binder->BindCreateTableInfo(move(info)); + return CreateTable(context, bound_info.get()); +} - // first find the set for this entry - auto &set = GetCatalogSet(entry_type); +CatalogEntry *Catalog::CreateTable(ClientContext &context, SchemaCatalogEntry *schema, BoundCreateTableInfo *info) { + return schema->CreateTable(context, info); +} - if (name != TEMP_SCHEMA) { - dependencies.insert(this); - } else { - entry->temporary = true; - } - if (on_conflict == OnCreateConflict::REPLACE_ON_CONFLICT) { - // CREATE OR REPLACE: first try to drop the entry - auto old_entry = set.GetEntry(context, entry_name); - if (old_entry) { - if (old_entry->type != entry_type) { - throw CatalogException("Existing object %s is of type %s, trying to replace with type %s", entry_name, - CatalogTypeToString(old_entry->type), CatalogTypeToString(entry_type)); - } - (void)set.DropEntry(context, entry_name, false); - } - } - // now try to add the entry - if (!set.CreateEntry(context, entry_name, move(entry), dependencies)) { - // entry already exists! - if (on_conflict == OnCreateConflict::ERROR_ON_CONFLICT) { - throw CatalogException("%s with name \"%s\" already exists!", CatalogTypeToString(entry_type), entry_name); - } else { - return nullptr; - } - } - return result; +CatalogEntry *Catalog::CreateView(ClientContext &context, CreateViewInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateView(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::AddEntry(ClientContext &context, unique_ptr entry, - OnCreateConflict on_conflict) { - unordered_set dependencies; - return AddEntry(context, move(entry), on_conflict, dependencies); +CatalogEntry *Catalog::CreateView(ClientContext &context, SchemaCatalogEntry *schema, CreateViewInfo *info) { + return schema->CreateView(context, info); } -CatalogEntry *SchemaCatalogEntry::CreateSequence(ClientContext &context, CreateSequenceInfo *info) { - auto sequence = make_unique(catalog, this, info); - return AddEntry(context, move(sequence), info->on_conflict); +CatalogEntry *Catalog::CreateSequence(ClientContext &context, CreateSequenceInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateSequence(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::CreateType(ClientContext &context, CreateTypeInfo *info) { - auto type_entry = make_unique(catalog, this, info); - return AddEntry(context, move(type_entry), info->on_conflict); +CatalogEntry *Catalog::CreateType(ClientContext &context, CreateTypeInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateType(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::CreateTable(ClientContext &context, BoundCreateTableInfo *info) { - auto table = make_unique(catalog, this, info); - table->storage->info->cardinality = table->storage->GetTotalRows(); +CatalogEntry *Catalog::CreateSequence(ClientContext &context, SchemaCatalogEntry *schema, CreateSequenceInfo *info) { + return schema->CreateSequence(context, info); +} - CatalogEntry *entry = AddEntry(context, move(table), info->Base().on_conflict, info->dependencies); - if (!entry) { - return nullptr; - } +CatalogEntry *Catalog::CreateType(ClientContext &context, SchemaCatalogEntry *schema, CreateTypeInfo *info) { + return schema->CreateType(context, info); +} - // add a foreign key constraint in main key table if there is a foreign key constraint - vector> fk_arrays; - FindForeignKeyInformation(entry, AlterForeignKeyType::AFT_ADD, fk_arrays); - for (idx_t i = 0; i < fk_arrays.size(); i++) { - // alter primary key table - AlterForeignKeyInfo *fk_info = fk_arrays[i].get(); - catalog->Alter(context, fk_info); +CatalogEntry *Catalog::CreateTableFunction(ClientContext &context, CreateTableFunctionInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateTableFunction(context, schema, info); +} - // make a dependency between this table and referenced table - auto &set = GetCatalogSet(CatalogType::TABLE_ENTRY); - info->dependencies.insert(set.GetEntry(context, fk_info->name)); - } - return entry; +CatalogEntry *Catalog::CreateTableFunction(ClientContext &context, SchemaCatalogEntry *schema, + CreateTableFunctionInfo *info) { + return schema->CreateTableFunction(context, info); } -CatalogEntry *SchemaCatalogEntry::CreateView(ClientContext &context, CreateViewInfo *info) { - auto view = make_unique(catalog, this, info); - return AddEntry(context, move(view), info->on_conflict); +CatalogEntry *Catalog::CreateCopyFunction(ClientContext &context, CreateCopyFunctionInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateCopyFunction(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::CreateIndex(ClientContext &context, CreateIndexInfo *info, TableCatalogEntry *table) { - unordered_set dependencies; - dependencies.insert(table); - auto index = make_unique(catalog, this, info); - return AddEntry(context, move(index), info->on_conflict, dependencies); +CatalogEntry *Catalog::CreateCopyFunction(ClientContext &context, SchemaCatalogEntry *schema, + CreateCopyFunctionInfo *info) { + return schema->CreateCopyFunction(context, info); } -CatalogEntry *SchemaCatalogEntry::CreateCollation(ClientContext &context, CreateCollationInfo *info) { - auto collation = make_unique(catalog, this, info); - return AddEntry(context, move(collation), info->on_conflict); +CatalogEntry *Catalog::CreatePragmaFunction(ClientContext &context, CreatePragmaFunctionInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreatePragmaFunction(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::CreateTableFunction(ClientContext &context, CreateTableFunctionInfo *info) { - auto table_function = make_unique(catalog, this, info); - return AddEntry(context, move(table_function), info->on_conflict); +CatalogEntry *Catalog::CreatePragmaFunction(ClientContext &context, SchemaCatalogEntry *schema, + CreatePragmaFunctionInfo *info) { + return schema->CreatePragmaFunction(context, info); } -CatalogEntry *SchemaCatalogEntry::CreateCopyFunction(ClientContext &context, CreateCopyFunctionInfo *info) { - auto copy_function = make_unique(catalog, this, info); - return AddEntry(context, move(copy_function), info->on_conflict); +CatalogEntry *Catalog::CreateFunction(ClientContext &context, CreateFunctionInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateFunction(context, schema, info); } -CatalogEntry *SchemaCatalogEntry::CreatePragmaFunction(ClientContext &context, CreatePragmaFunctionInfo *info) { - auto pragma_function = make_unique(catalog, this, info); - return AddEntry(context, move(pragma_function), info->on_conflict); +CatalogEntry *Catalog::CreateFunction(ClientContext &context, SchemaCatalogEntry *schema, CreateFunctionInfo *info) { + return schema->CreateFunction(context, info); } -CatalogEntry *SchemaCatalogEntry::CreateFunction(ClientContext &context, CreateFunctionInfo *info) { - unique_ptr function; - switch (info->type) { - case CatalogType::SCALAR_FUNCTION_ENTRY: - function = make_unique_base(catalog, this, - (CreateScalarFunctionInfo *)info); - break; - case CatalogType::MACRO_ENTRY: - // create a macro function - function = make_unique_base(catalog, this, (CreateMacroInfo *)info); - break; +CatalogEntry *Catalog::CreateCollation(ClientContext &context, CreateCollationInfo *info) { + auto schema = GetSchema(context, info->schema); + return CreateCollation(context, schema, info); +} - case CatalogType::TABLE_MACRO_ENTRY: - // create a macro function - function = make_unique_base(catalog, this, (CreateMacroInfo *)info); - break; - case CatalogType::AGGREGATE_FUNCTION_ENTRY: - D_ASSERT(info->type == CatalogType::AGGREGATE_FUNCTION_ENTRY); - // create an aggregate function - function = make_unique_base(catalog, this, - (CreateAggregateFunctionInfo *)info); - break; - default: - throw InternalException("Unknown function type \"%s\"", CatalogTypeToString(info->type)); - } - return AddEntry(context, move(function), info->on_conflict); +CatalogEntry *Catalog::CreateCollation(ClientContext &context, SchemaCatalogEntry *schema, CreateCollationInfo *info) { + return schema->CreateCollation(context, info); } -CatalogEntry *SchemaCatalogEntry::AddFunction(ClientContext &context, CreateFunctionInfo *info) { - auto entry = GetCatalogSet(info->type).GetEntry(context, info->name); - if (!entry) { - return CreateFunction(context, info); +CatalogEntry *Catalog::CreateSchema(ClientContext &context, CreateSchemaInfo *info) { + D_ASSERT(!info->schema.empty()); + if (info->schema == TEMP_SCHEMA) { + throw CatalogException("Cannot create built-in schema \"%s\"", info->schema); } - info->on_conflict = OnCreateConflict::REPLACE_ON_CONFLICT; - switch (info->type) { - case CatalogType::SCALAR_FUNCTION_ENTRY: { - auto scalar_info = (CreateScalarFunctionInfo *)info; - auto &scalars = *(ScalarFunctionCatalogEntry *)entry; - for (const auto &scalar : scalars.functions.functions) { - scalar_info->functions.AddFunction(scalar); - } - break; - } - case CatalogType::AGGREGATE_FUNCTION_ENTRY: { - auto agg_info = (CreateAggregateFunctionInfo *)info; - auto &aggs = *(AggregateFunctionCatalogEntry *)entry; - for (const auto &agg : aggs.functions.functions) { - agg_info->functions.AddFunction(agg); + unordered_set dependencies; + auto entry = make_unique(this, info->schema, info->internal); + auto result = entry.get(); + if (!schemas->CreateEntry(context, info->schema, move(entry), dependencies)) { + if (info->on_conflict == OnCreateConflict::ERROR_ON_CONFLICT) { + throw CatalogException("Schema with name %s already exists!", info->schema); + } else { + D_ASSERT(info->on_conflict == OnCreateConflict::IGNORE_ON_CONFLICT); } - break; - } - default: - // Macros can only be replaced because there is only one of each name. - throw InternalException("Unsupported function type \"%s\" for adding", CatalogTypeToString(info->type)); + return nullptr; } - return CreateFunction(context, info); + return result; } -void SchemaCatalogEntry::DropEntry(ClientContext &context, DropInfo *info) { - auto &set = GetCatalogSet(info->type); - - // first find the entry - auto existing_entry = set.GetEntry(context, info->name); - if (!existing_entry) { +void Catalog::DropSchema(ClientContext &context, DropInfo *info) { + D_ASSERT(!info->name.empty()); + ModifyCatalog(); + if (!schemas->DropEntry(context, info->name, info->cascade)) { if (!info->if_exists) { - throw CatalogException("%s with name \"%s\" does not exist!", CatalogTypeToString(info->type), info->name); + throw CatalogException("Schema with name \"%s\" does not exist!", info->name); } - return; - } - if (existing_entry->type != info->type) { - throw CatalogException("Existing object %s is of type %s, trying to replace with type %s", info->name, - CatalogTypeToString(existing_entry->type), CatalogTypeToString(info->type)); } +} - // if there is a foreign key constraint, get that information - vector> fk_arrays; - FindForeignKeyInformation(existing_entry, AlterForeignKeyType::AFT_DELETE, fk_arrays); - - if (!set.DropEntry(context, info->name, info->cascade)) { - throw InternalException("Could not drop element because of an internal error"); +void Catalog::DropEntry(ClientContext &context, DropInfo *info) { + ModifyCatalog(); + if (info->type == CatalogType::SCHEMA_ENTRY) { + // DROP SCHEMA + DropSchema(context, info); + return; } - // remove the foreign key constraint in main key table if main key table's name is valid - for (idx_t i = 0; i < fk_arrays.size(); i++) { - // alter primary key tablee - Catalog::GetCatalog(context).Alter(context, fk_arrays[i].get()); + auto lookup = LookupEntry(context, info->type, info->schema, info->name, info->if_exists); + if (!lookup.Found()) { + return; } -} -void SchemaCatalogEntry::Alter(ClientContext &context, AlterInfo *info) { - CatalogType type = info->GetCatalogType(); - auto &set = GetCatalogSet(type); - if (info->type == AlterType::CHANGE_OWNERSHIP) { - if (!set.AlterOwnership(context, (ChangeOwnershipInfo *)info)) { - throw CatalogException("Couldn't change ownership!"); - } - } else { - string name = info->name; - if (!set.AlterEntry(context, name, info)) { - throw CatalogException("Entry with name \"%s\" does not exist!", name); - } - } + lookup.schema->DropEntry(context, info); } -void SchemaCatalogEntry::Scan(ClientContext &context, CatalogType type, - const std::function &callback) { - auto &set = GetCatalogSet(type); - set.Scan(context, callback); +CatalogEntry *Catalog::AddFunction(ClientContext &context, CreateFunctionInfo *info) { + auto schema = GetSchema(context, info->schema); + return AddFunction(context, schema, info); } -void SchemaCatalogEntry::Scan(CatalogType type, const std::function &callback) { - auto &set = GetCatalogSet(type); - set.Scan(callback); +CatalogEntry *Catalog::AddFunction(ClientContext &context, SchemaCatalogEntry *schema, CreateFunctionInfo *info) { + return schema->AddFunction(context, info); } -void SchemaCatalogEntry::Serialize(Serializer &serializer) { - FieldWriter writer(serializer); - writer.WriteString(name); - writer.Finalize(); +SchemaCatalogEntry *Catalog::GetSchema(ClientContext &context, const string &schema_name, bool if_exists, + QueryErrorContext error_context) { + D_ASSERT(!schema_name.empty()); + if (schema_name == TEMP_SCHEMA) { + return SchemaCatalogEntry::GetTemporaryObjects(context); + } + auto entry = schemas->GetEntry(context, schema_name); + if (!entry && !if_exists) { + throw CatalogException(error_context.FormatError("Schema with name %s does not exist!", schema_name)); + } + return (SchemaCatalogEntry *)entry; } -unique_ptr SchemaCatalogEntry::Deserialize(Deserializer &source) { - auto info = make_unique(); +void Catalog::ScanSchemas(ClientContext &context, std::function callback) { + // create all default schemas first + schemas->Scan(context, [&](CatalogEntry *entry) { callback(entry); }); +} - FieldReader reader(source); - info->schema = reader.ReadRequired(); - reader.Finalize(); +SimilarCatalogEntry Catalog::SimilarEntryInSchemas(ClientContext &context, const string &entry_name, CatalogType type, + const vector &schemas) { - return info; + vector sets; + std::transform(schemas.begin(), schemas.end(), std::back_inserter(sets), + [type](SchemaCatalogEntry *s) -> CatalogSet * { return &s->GetCatalogSet(type); }); + pair most_similar {"", (idx_t)-1}; + SchemaCatalogEntry *schema_of_most_similar = nullptr; + for (auto schema : schemas) { + auto entry = schema->GetCatalogSet(type).SimilarEntry(context, entry_name); + if (!entry.first.empty() && (most_similar.first.empty() || most_similar.second > entry.second)) { + most_similar = entry; + schema_of_most_similar = schema; + } + } + + return {most_similar.first, most_similar.second, schema_of_most_similar}; } -string SchemaCatalogEntry::ToSQL() { - std::stringstream ss; - ss << "CREATE SCHEMA " << name << ";"; - return ss.str(); +string FindExtension(const string &function_name) { + auto size = sizeof(EXTENSION_FUNCTIONS) / sizeof(ExtensionFunction); + auto it = std::lower_bound( + EXTENSION_FUNCTIONS, EXTENSION_FUNCTIONS + size, function_name, + [](const ExtensionFunction &element, const string &value) { return element.function < value; }); + if (it != EXTENSION_FUNCTIONS + size && it->function == function_name) { + return it->extension; + } + return ""; } +CatalogException Catalog::CreateMissingEntryException(ClientContext &context, const string &entry_name, + CatalogType type, const vector &schemas, + QueryErrorContext error_context) { + auto entry = SimilarEntryInSchemas(context, entry_name, type, schemas); -CatalogSet &SchemaCatalogEntry::GetCatalogSet(CatalogType type) { - switch (type) { - case CatalogType::VIEW_ENTRY: - case CatalogType::TABLE_ENTRY: - return tables; - case CatalogType::INDEX_ENTRY: - return indexes; - case CatalogType::TABLE_FUNCTION_ENTRY: - case CatalogType::TABLE_MACRO_ENTRY: - return table_functions; - case CatalogType::COPY_FUNCTION_ENTRY: - return copy_functions; - case CatalogType::PRAGMA_FUNCTION_ENTRY: - return pragma_functions; - case CatalogType::AGGREGATE_FUNCTION_ENTRY: - case CatalogType::SCALAR_FUNCTION_ENTRY: - case CatalogType::MACRO_ENTRY: - return functions; - case CatalogType::SEQUENCE_ENTRY: - return sequences; - case CatalogType::COLLATION_ENTRY: - return collations; - case CatalogType::TYPE_ENTRY: - return types; - default: - throw InternalException("Unsupported catalog type in schema"); + vector unseen_schemas; + this->schemas->Scan([&schemas, &unseen_schemas](CatalogEntry *entry) { + auto schema_entry = (SchemaCatalogEntry *)entry; + if (std::find(schemas.begin(), schemas.end(), schema_entry) == schemas.end()) { + unseen_schemas.emplace_back(schema_entry); + } + }); + auto unseen_entry = SimilarEntryInSchemas(context, entry_name, type, unseen_schemas); + auto extension_name = FindExtension(entry_name); + if (!extension_name.empty()) { + return CatalogException("Function with name %s is not on the catalog, but it exists in the %s extension. To " + "Install and Load the extension, run: INSTALL %s; LOAD %s;", + entry_name, extension_name, extension_name, extension_name); + } + string did_you_mean; + if (unseen_entry.Found() && unseen_entry.distance < entry.distance) { + did_you_mean = "\nDid you mean \"" + unseen_entry.GetQualifiedName() + "\"?"; + } else if (entry.Found()) { + did_you_mean = "\nDid you mean \"" + entry.name + "\"?"; } + + return CatalogException(error_context.FormatError("%s with name %s does not exist!%s", CatalogTypeToString(type), + entry_name, did_you_mean)); } -} // namespace duckdb +CatalogEntryLookup Catalog::LookupEntry(ClientContext &context, CatalogType type, const string &schema_name, + const string &name, bool if_exists, QueryErrorContext error_context) { + if (!schema_name.empty()) { + auto schema = GetSchema(context, schema_name, if_exists, error_context); + if (!schema) { + D_ASSERT(if_exists); + return {nullptr, nullptr}; + } + + auto entry = schema->GetCatalogSet(type).GetEntry(context, name); + if (!entry && !if_exists) { + throw CreateMissingEntryException(context, name, type, {schema}, error_context); + } + return {schema, entry}; + } + const auto &paths = ClientData::Get(context).catalog_search_path->Get(); + for (const auto &path : paths) { + auto lookup = LookupEntry(context, type, path, name, true, error_context); + if (lookup.Found()) { + return lookup; + } + } + if (!if_exists) { + vector schemas; + for (const auto &path : paths) { + auto schema = GetSchema(context, path, true); + if (schema) { + schemas.emplace_back(schema); + } + } + throw CreateMissingEntryException(context, name, type, schemas, error_context); + } + return {nullptr, nullptr}; +} +CatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema, const string &name) { + vector entry_types {CatalogType::TABLE_ENTRY, CatalogType::SEQUENCE_ENTRY}; + for (auto entry_type : entry_types) { + CatalogEntry *result = GetEntry(context, entry_type, schema, name, true); + if (result != nullptr) { + return result; + } + } -#include -#include + throw CatalogException("CatalogElement \"%s.%s\" does not exist!", schema, name); +} -namespace duckdb { +CatalogEntry *Catalog::GetEntry(ClientContext &context, CatalogType type, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return LookupEntry(context, type, schema_name, name, if_exists, error_context).entry; +} -SequenceCatalogEntry::SequenceCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateSequenceInfo *info) - : StandardEntry(CatalogType::SEQUENCE_ENTRY, schema, catalog, info->name), usage_count(info->usage_count), - counter(info->start_value), increment(info->increment), start_value(info->start_value), - min_value(info->min_value), max_value(info->max_value), cycle(info->cycle) { - this->temporary = info->temporary; +template <> +TableCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + auto entry = GetEntry(context, CatalogType::TABLE_ENTRY, schema_name, name, if_exists); + if (!entry) { + return nullptr; + } + if (entry->type != CatalogType::TABLE_ENTRY) { + throw CatalogException(error_context.FormatError("%s is not a table", name)); + } + return (TableCatalogEntry *)entry; } -void SequenceCatalogEntry::Serialize(Serializer &serializer) { - FieldWriter writer(serializer); - writer.WriteString(schema->name); - writer.WriteString(name); - writer.WriteField(usage_count); - writer.WriteField(increment); - writer.WriteField(min_value); - writer.WriteField(max_value); - writer.WriteField(counter); - writer.WriteField(cycle); - writer.Finalize(); +template <> +SequenceCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return (SequenceCatalogEntry *)GetEntry(context, CatalogType::SEQUENCE_ENTRY, schema_name, name, if_exists, + error_context); } -unique_ptr SequenceCatalogEntry::Deserialize(Deserializer &source) { - auto info = make_unique(); +template <> +TableFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return (TableFunctionCatalogEntry *)GetEntry(context, CatalogType::TABLE_FUNCTION_ENTRY, schema_name, name, + if_exists, error_context); +} - FieldReader reader(source); - info->schema = reader.ReadRequired(); - info->name = reader.ReadRequired(); - info->usage_count = reader.ReadRequired(); - info->increment = reader.ReadRequired(); - info->min_value = reader.ReadRequired(); - info->max_value = reader.ReadRequired(); - info->start_value = reader.ReadRequired(); - info->cycle = reader.ReadRequired(); - reader.Finalize(); +template <> +CopyFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return (CopyFunctionCatalogEntry *)GetEntry(context, CatalogType::COPY_FUNCTION_ENTRY, schema_name, name, if_exists, + error_context); +} - return info; +template <> +PragmaFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return (PragmaFunctionCatalogEntry *)GetEntry(context, CatalogType::PRAGMA_FUNCTION_ENTRY, schema_name, name, + if_exists, error_context); } -string SequenceCatalogEntry::ToSQL() { - std::stringstream ss; - ss << "CREATE SEQUENCE "; - ss << name; - ss << " INCREMENT BY " << increment; - ss << " MINVALUE " << min_value; - ss << " MAXVALUE " << max_value; - ss << " START " << counter; - ss << " " << (cycle ? "CYCLE" : "NO CYCLE") << ";"; - return ss.str(); +template <> +AggregateFunctionCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + auto entry = GetEntry(context, CatalogType::AGGREGATE_FUNCTION_ENTRY, schema_name, name, if_exists, error_context); + if (entry->type != CatalogType::AGGREGATE_FUNCTION_ENTRY) { + throw CatalogException(error_context.FormatError("%s is not an aggregate function", name)); + } + return (AggregateFunctionCatalogEntry *)entry; } -} // namespace duckdb +template <> +CollateCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return (CollateCatalogEntry *)GetEntry(context, CatalogType::COLLATION_ENTRY, schema_name, name, if_exists, + error_context); +} +template <> +TypeCatalogEntry *Catalog::GetEntry(ClientContext &context, const string &schema_name, const string &name, + bool if_exists, QueryErrorContext error_context) { + return (TypeCatalogEntry *)GetEntry(context, CatalogType::TYPE_ENTRY, schema_name, name, if_exists, error_context); +} +LogicalType Catalog::GetType(ClientContext &context, const string &schema, const string &name) { + auto user_type_catalog = GetEntry(context, schema, name); + auto result_type = user_type_catalog->user_type; + LogicalType::SetCatalog(result_type, user_type_catalog); + return result_type; +} +void Catalog::Alter(ClientContext &context, AlterInfo *info) { + ModifyCatalog(); + auto lookup = LookupEntry(context, info->GetCatalogType(), info->schema, info->name, info->if_exists); + if (!lookup.Found()) { + return; + } + return lookup.schema->Alter(context, info); +} +idx_t Catalog::GetCatalogVersion() { + return catalog_version; +} +idx_t Catalog::ModifyCatalog() { + return catalog_version++; +} +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/parser/constraints/check_constraint.hpp +// duckdb/common/queue.hpp // // //===----------------------------------------------------------------------===// - - - +#include namespace duckdb { +using std::queue; +} -//! The CheckConstraint contains an expression that must evaluate to TRUE for -//! every row in a table -class CheckConstraint : public Constraint { -public: - DUCKDB_API explicit CheckConstraint(unique_ptr expression); - - unique_ptr expression; - -public: - DUCKDB_API string ToString() const override; - DUCKDB_API unique_ptr Copy() const override; +namespace duckdb { - DUCKDB_API void Serialize(FieldWriter &writer) const override; - DUCKDB_API static unique_ptr Deserialize(FieldReader &source); -}; +ColumnDependencyManager::ColumnDependencyManager() { +} -} // namespace duckdb +ColumnDependencyManager::~ColumnDependencyManager() { +} -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/parser/constraints/not_null_constraint.hpp -// -// -//===----------------------------------------------------------------------===// +void ColumnDependencyManager::AddGeneratedColumn(const ColumnDefinition &column, const ColumnList &list) { + D_ASSERT(column.Generated()); + vector referenced_columns; + column.GetListOfDependencies(referenced_columns); + vector indices; + for (auto &col : referenced_columns) { + if (!list.ColumnExists(col)) { + throw BinderException("Column \"%s\" referenced by generated column does not exist", col); + } + auto &entry = list.GetColumn(col); + indices.push_back(entry.Logical()); + } + return AddGeneratedColumn(column.Logical(), indices); +} +void ColumnDependencyManager::AddGeneratedColumn(LogicalIndex index, const vector &indices, bool root) { + if (indices.empty()) { + return; + } + auto &list = dependents_map[index]; + // Create a link between the dependencies + for (auto &dep : indices) { + // Add this column as a dependency of the new column + list.insert(dep); + // Add the new column as a dependent of the column + dependencies_map[dep].insert(index); + // Inherit the dependencies + if (HasDependencies(dep)) { + auto &inherited_deps = dependents_map[dep]; + D_ASSERT(!inherited_deps.empty()); + for (auto &inherited_dep : inherited_deps) { + list.insert(inherited_dep); + dependencies_map[inherited_dep].insert(index); + } + } + if (!root) { + continue; + } + direct_dependencies[index].insert(dep); + } + if (!HasDependents(index)) { + return; + } + auto &dependents = dependencies_map[index]; + if (dependents.count(index)) { + throw InvalidInputException("Circular dependency encountered when resolving generated column expressions"); + } + // Also let the dependents of this generated column inherit the dependencies + for (auto &dependent : dependents) { + AddGeneratedColumn(dependent, indices, false); + } +} +vector ColumnDependencyManager::RemoveColumn(LogicalIndex index, idx_t column_amount) { + // Always add the initial column + deleted_columns.insert(index); + RemoveGeneratedColumn(index); + RemoveStandardColumn(index); + // Clean up the internal list + vector new_indices = CleanupInternals(column_amount); + D_ASSERT(deleted_columns.empty()); + return new_indices; +} -namespace duckdb { +bool ColumnDependencyManager::IsDependencyOf(LogicalIndex gcol, LogicalIndex col) const { + auto entry = dependents_map.find(gcol); + if (entry == dependents_map.end()) { + return false; + } + auto &list = entry->second; + return list.count(col); +} -class NotNullConstraint : public Constraint { -public: - DUCKDB_API explicit NotNullConstraint(column_t index); - DUCKDB_API ~NotNullConstraint() override; +bool ColumnDependencyManager::HasDependencies(LogicalIndex index) const { + auto entry = dependents_map.find(index); + if (entry == dependents_map.end()) { + return false; + } + return true; +} - //! Column index this constraint pertains to - column_t index; +const logical_index_set_t &ColumnDependencyManager::GetDependencies(LogicalIndex index) const { + auto entry = dependents_map.find(index); + D_ASSERT(entry != dependents_map.end()); + return entry->second; +} -public: - DUCKDB_API string ToString() const override; +bool ColumnDependencyManager::HasDependents(LogicalIndex index) const { + auto entry = dependencies_map.find(index); + if (entry == dependencies_map.end()) { + return false; + } + return true; +} - DUCKDB_API unique_ptr Copy() const override; +const logical_index_set_t &ColumnDependencyManager::GetDependents(LogicalIndex index) const { + auto entry = dependencies_map.find(index); + D_ASSERT(entry != dependencies_map.end()); + return entry->second; +} - //! Serialize to a stand-alone binary blob - DUCKDB_API void Serialize(FieldWriter &writer) const override; - //! Deserializes a NotNullConstraint - DUCKDB_API static unique_ptr Deserialize(FieldReader &source); -}; +void ColumnDependencyManager::RemoveStandardColumn(LogicalIndex index) { + if (!HasDependents(index)) { + return; + } + auto dependents = dependencies_map[index]; + for (auto &gcol : dependents) { + // If index is a direct dependency of gcol, remove it from the list + if (direct_dependencies.find(gcol) != direct_dependencies.end()) { + direct_dependencies[gcol].erase(index); + } + RemoveGeneratedColumn(gcol); + } + // Remove this column from the dependencies map + dependencies_map.erase(index); +} -} // namespace duckdb +void ColumnDependencyManager::RemoveGeneratedColumn(LogicalIndex index) { + deleted_columns.insert(index); + if (!HasDependencies(index)) { + return; + } + auto &dependencies = dependents_map[index]; + for (auto &col : dependencies) { + // Remove this generated column from the list of this column + auto &col_dependents = dependencies_map[col]; + D_ASSERT(col_dependents.count(index)); + col_dependents.erase(index); + // If the resulting list is empty, remove the column from the dependencies map altogether + if (col_dependents.empty()) { + dependencies_map.erase(col); + } + } + // Remove this column from the dependents_map map + dependents_map.erase(index); +} -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/parser/constraints/unique_constraint.hpp -// -// -//===----------------------------------------------------------------------===// +void ColumnDependencyManager::AdjustSingle(LogicalIndex idx, idx_t offset) { + D_ASSERT(idx.index >= offset); + LogicalIndex new_idx = LogicalIndex(idx.index - offset); + // Adjust this index in the dependents of this column + bool has_dependents = HasDependents(idx); + bool has_dependencies = HasDependencies(idx); + if (has_dependents) { + auto &dependents = GetDependents(idx); + for (auto &dep : dependents) { + auto &dep_dependencies = dependents_map[dep]; + dep_dependencies.erase(idx); + D_ASSERT(!dep_dependencies.count(new_idx)); + dep_dependencies.insert(new_idx); + } + } + if (has_dependencies) { + auto &dependencies = GetDependencies(idx); + for (auto &dep : dependencies) { + auto &dep_dependents = dependencies_map[dep]; + dep_dependents.erase(idx); + D_ASSERT(!dep_dependents.count(new_idx)); + dep_dependents.insert(new_idx); + } + } + if (has_dependents) { + D_ASSERT(!dependencies_map.count(new_idx)); + dependencies_map[new_idx] = move(dependencies_map[idx]); + dependencies_map.erase(idx); + } + if (has_dependencies) { + D_ASSERT(!dependents_map.count(new_idx)); + dependents_map[new_idx] = move(dependents_map[idx]); + dependents_map.erase(idx); + } +} +vector ColumnDependencyManager::CleanupInternals(idx_t column_amount) { + vector to_adjust; + D_ASSERT(!deleted_columns.empty()); + // Get the lowest index that was deleted + vector new_indices(column_amount, LogicalIndex(DConstants::INVALID_INDEX)); + idx_t threshold = deleted_columns.begin()->index; + idx_t offset = 0; + for (idx_t i = 0; i < column_amount; i++) { + auto current_index = LogicalIndex(i); + auto new_index = LogicalIndex(i - offset); + new_indices[i] = new_index; + if (deleted_columns.count(current_index)) { + offset++; + continue; + } + if (i > threshold && (HasDependencies(current_index) || HasDependents(current_index))) { + to_adjust.push_back(current_index); + } + } + // Adjust all indices inside the dependency managers internal mappings + for (auto &col : to_adjust) { + auto offset = col.index - new_indices[col.index].index; + AdjustSingle(col, offset); + } + deleted_columns.clear(); + return new_indices; +} +stack ColumnDependencyManager::GetBindOrder(const ColumnList &columns) { + stack bind_order; + queue to_visit; + logical_index_set_t visited; -namespace duckdb { + for (auto &entry : direct_dependencies) { + auto dependent = entry.first; + //! Skip the dependents that are also dependencies + if (dependencies_map.find(dependent) != dependencies_map.end()) { + continue; + } + bind_order.push(dependent); + visited.insert(dependent); + for (auto &dependency : direct_dependencies[dependent]) { + to_visit.push(dependency); + } + } -class UniqueConstraint : public Constraint { -public: - DUCKDB_API UniqueConstraint(uint64_t index, bool is_primary_key); - DUCKDB_API UniqueConstraint(vector columns, bool is_primary_key); + while (!to_visit.empty()) { + auto column = to_visit.front(); + to_visit.pop(); - //! The index of the column for which this constraint holds. Only used when the constraint relates to a single - //! column, equal to DConstants::INVALID_INDEX if not used - uint64_t index; - //! The set of columns for which this constraint holds by name. Only used when the index field is not used. - vector columns; - //! Whether or not this is a PRIMARY KEY constraint, or a UNIQUE constraint. - bool is_primary_key; + //! If this column does not have dependencies, the queue stops getting filled + if (direct_dependencies.find(column) == direct_dependencies.end()) { + continue; + } + bind_order.push(column); + visited.insert(column); -public: - DUCKDB_API string ToString() const override; + for (auto &dependency : direct_dependencies[column]) { + to_visit.push(dependency); + } + } - DUCKDB_API unique_ptr Copy() const override; + // Add generated columns that have no dependencies, but still might need to have their type resolved + for (auto &col : columns.Logical()) { + // Not a generated column + if (!col.Generated()) { + continue; + } + // Already added to the bind_order stack + if (visited.count(col.Logical())) { + continue; + } + bind_order.push(col.Logical()); + } - //! Serialize to a stand-alone binary blob - DUCKDB_API void Serialize(FieldWriter &writer) const override; - //! Deserializes a ParsedConstraint - DUCKDB_API static unique_ptr Deserialize(FieldReader &source); -}; + return bind_order; +} } // namespace duckdb +namespace duckdb { + +CopyFunctionCatalogEntry::CopyFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, + CreateCopyFunctionInfo *info) + : StandardEntry(CatalogType::COPY_FUNCTION_ENTRY, schema, catalog, info->name), function(info->function) { +} +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/parser/parsed_expression_iterator.hpp +// duckdb/storage/data_table.hpp // // //===----------------------------------------------------------------------===// @@ -5940,34 +6853,25 @@ class UniqueConstraint : public Constraint { -#include -namespace duckdb { -class ParsedExpressionIterator { -public: - static void EnumerateChildren(const ParsedExpression &expression, - const std::function &callback); - static void EnumerateChildren(ParsedExpression &expr, const std::function &callback); - static void EnumerateChildren(ParsedExpression &expr, - const std::function &child)> &callback); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/table/table_statistics.hpp +// +// +//===----------------------------------------------------------------------===// + - static void EnumerateTableRefChildren(TableRef &ref, - const std::function &child)> &callback); - static void EnumerateQueryNodeChildren(QueryNode &node, - const std::function &child)> &callback); - static void EnumerateQueryNodeModifiers(QueryNode &node, - const std::function &child)> &callback); -}; -} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/planner/constraints/bound_check_constraint.hpp +// duckdb/storage/statistics/column_statistics.hpp // // //===----------------------------------------------------------------------===// @@ -5976,55 +6880,68 @@ class ParsedExpressionIterator { - - namespace duckdb { -//! The CheckConstraint contains an expression that must evaluate to TRUE for -//! every row in a table -class BoundCheckConstraint : public BoundConstraint { +class ColumnStatistics { public: - BoundCheckConstraint() : BoundConstraint(ConstraintType::CHECK) { - } + explicit ColumnStatistics(unique_ptr stats_p); - //! The expression - unique_ptr expression; - //! The columns used by the CHECK constraint - unordered_set bound_columns; + unique_ptr stats; + +public: + static shared_ptr CreateEmptyStats(const LogicalType &type); }; } // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/planner/constraints/bound_not_null_constraint.hpp -// -// -//===----------------------------------------------------------------------===// +namespace duckdb { +class PersistentTableData; + +class TableStatisticsLock { +public: + TableStatisticsLock(mutex &l) : guard(l) { + } + lock_guard guard; +}; +class TableStatistics { +public: + void Initialize(const vector &types, PersistentTableData &data); + void InitializeEmpty(const vector &types); + void InitializeAddColumn(TableStatistics &parent, const LogicalType &new_column_type); + void InitializeRemoveColumn(TableStatistics &parent, idx_t removed_column); + void InitializeAlterType(TableStatistics &parent, idx_t changed_idx, const LogicalType &new_type); + void InitializeAddConstraint(TableStatistics &parent); + void MergeStats(TableStatistics &other); + void MergeStats(idx_t i, BaseStatistics &stats); + void MergeStats(TableStatisticsLock &lock, idx_t i, BaseStatistics &stats); -namespace duckdb { + unique_ptr CopyStats(idx_t i); + ColumnStatistics &GetStats(idx_t i); -class BoundNotNullConstraint : public BoundConstraint { -public: - explicit BoundNotNullConstraint(column_t index) : BoundConstraint(ConstraintType::NOT_NULL), index(index) { - } + bool Empty(); - //! Column index this constraint pertains to - storage_t index; + unique_ptr GetLock(); + +private: + //! The statistics lock + mutex stats_lock; + //! Column statistics + vector> column_stats; }; } // namespace duckdb + + //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/planner/constraints/bound_unique_constraint.hpp +// duckdb/storage/table/column_segment.hpp // // //===----------------------------------------------------------------------===// @@ -6034,72 +6951,145 @@ class BoundNotNullConstraint : public BoundConstraint { + + + + + + + namespace duckdb { +class ColumnSegment; +class BlockManager; +class ColumnSegment; +class ColumnData; +class DatabaseInstance; +class Transaction; +class BaseStatistics; +class UpdateSegment; +class TableFilter; +struct ColumnFetchState; +struct ColumnScanState; +struct ColumnAppendState; -class BoundUniqueConstraint : public BoundConstraint { +enum class ColumnSegmentType : uint8_t { TRANSIENT, PERSISTENT }; +//! TableFilter represents a filter pushed down into the table scan. + +class ColumnSegment : public SegmentBase { public: - BoundUniqueConstraint(vector keys, unordered_set key_set, bool is_primary_key) - : BoundConstraint(ConstraintType::UNIQUE), keys(move(keys)), key_set(move(key_set)), - is_primary_key(is_primary_key) { -#ifdef DEBUG - D_ASSERT(keys.size() == key_set.size()); - for (auto &key : keys) { - D_ASSERT(key_set.find(key) != key_set.end()); - } -#endif - } + ~ColumnSegment() override; - //! The keys that define the unique constraint - vector keys; - //! The same keys but stored as an unordered set - unordered_set key_set; - //! Whether or not the unique constraint is a primary key - bool is_primary_key; -}; + //! The database instance + DatabaseInstance &db; + //! The type stored in the column + LogicalType type; + //! The size of the type + idx_t type_size; + //! The column segment type (transient or persistent) + ColumnSegmentType segment_type; + //! The compression function + CompressionFunction *function; + //! The statistics for the segment + SegmentStatistics stats; + //! The block that this segment relates to + shared_ptr block; -} // namespace duckdb + static unique_ptr CreatePersistentSegment(DatabaseInstance &db, BlockManager &block_manager, + block_id_t id, idx_t offset, const LogicalType &type_p, + idx_t start, idx_t count, CompressionType compression_type, + unique_ptr statistics); + static unique_ptr CreateTransientSegment(DatabaseInstance &db, const LogicalType &type, idx_t start, + idx_t segment_size = Storage::BLOCK_SIZE); + static unique_ptr CreateSegment(ColumnSegment &other, idx_t start); + +public: + void InitializeScan(ColumnScanState &state); + //! Scan one vector from this segment + void Scan(ColumnScanState &state, idx_t scan_count, Vector &result, idx_t result_offset, bool entire_vector); + //! Fetch a value of the specific row id and append it to the result + void FetchRow(ColumnFetchState &state, row_t row_id, Vector &result, idx_t result_idx); + static idx_t FilterSelection(SelectionVector &sel, Vector &result, const TableFilter &filter, + idx_t &approved_tuple_count, ValidityMask &mask); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/planner/expression_binder/alter_binder.hpp -// -// -//===----------------------------------------------------------------------===// + //! Skip a scan forward to the row_index specified in the scan state + void Skip(ColumnScanState &state); + // The maximum size of the buffer (in bytes) + idx_t SegmentSize() const; + //! Resize the block + void Resize(idx_t segment_size); + //! Initialize an append of this segment. Appends are only supported on transient segments. + void InitializeAppend(ColumnAppendState &state); + //! Appends a (part of) vector to the segment, returns the amount of entries successfully appended + idx_t Append(ColumnAppendState &state, UnifiedVectorFormat &data, idx_t offset, idx_t count); + //! Finalize the segment for appending - no more appends can follow on this segment + //! The segment should be compacted as much as possible + //! Returns the number of bytes occupied within the segment + idx_t FinalizeAppend(ColumnAppendState &state); + //! Revert an append made to this segment + void RevertAppend(idx_t start_row); + //! Convert a transient in-memory segment into a persistent segment blocked by an on-disk block. + //! Only used during checkpointing. + void ConvertToPersistent(BlockManager *block_manager, block_id_t block_id); + //! Updates pointers to refer to the given block and offset. This is only used + //! when sharing a block among segments. This is invoked only AFTER the block is written. + void MarkAsPersistent(shared_ptr block, uint32_t offset_in_block); + block_id_t GetBlockId() { + D_ASSERT(segment_type == ColumnSegmentType::PERSISTENT); + return block_id; + } + BlockManager &GetBlockManager() const { + return block->block_manager; + } -namespace duckdb { -class TableCatalogEntry; + idx_t GetBlockOffset() { + D_ASSERT(segment_type == ColumnSegmentType::PERSISTENT || offset == 0); + return offset; + } -//! The ALTER binder is responsible for binding an expression within alter statements -class AlterBinder : public ExpressionBinder { -public: - AlterBinder(Binder &binder, ClientContext &context, TableCatalogEntry &table, vector &bound_columns, - LogicalType target_type); + idx_t GetRelativeIndex(idx_t row_index) { + D_ASSERT(row_index >= this->start); + D_ASSERT(row_index <= this->start + this->count); + return row_index - this->start; + } - TableCatalogEntry &table; - vector &bound_columns; + CompressedSegmentState *GetSegmentState() { + return segment_state.get(); + } -protected: - BindResult BindExpression(unique_ptr *expr_ptr, idx_t depth, - bool root_expression = false) override; +public: + ColumnSegment(DatabaseInstance &db, shared_ptr block, LogicalType type, ColumnSegmentType segment_type, + idx_t start, idx_t count, CompressionFunction *function, unique_ptr statistics, + block_id_t block_id, idx_t offset, idx_t segment_size); + ColumnSegment(ColumnSegment &other, idx_t start); - BindResult BindColumn(ColumnRefExpression &expr); +private: + void Scan(ColumnScanState &state, idx_t scan_count, Vector &result); + void ScanPartial(ColumnScanState &state, idx_t scan_count, Vector &result, idx_t result_offset); - string UnsupportedAggregateMessage() override; +private: + //! The block id that this segment relates to (persistent segment only) + block_id_t block_id; + //! The offset into the block (persistent segment only) + idx_t offset; + //! The allocated segment size + idx_t segment_size; + //! Storage associated with the compressed segment + unique_ptr segment_state; }; } // namespace duckdb + //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/planner/filter/null_filter.hpp +// duckdb/storage/table/row_group_collection.hpp // // //===----------------------------------------------------------------------===// @@ -6108,63 +7098,114 @@ class AlterBinder : public ExpressionBinder { + + + namespace duckdb { +struct ParallelTableScanState; -class IsNullFilter : public TableFilter { -public: - IsNullFilter(); +class PersistentTableData; +class TableDataWriter; +class TableIndexList; +class TableStatistics; +class RowGroupCollection { public: - FilterPropagateResult CheckStatistics(BaseStatistics &stats) override; - string ToString(const string &column_name) override; - void Serialize(FieldWriter &writer) const override; - static unique_ptr Deserialize(FieldReader &source); -}; + RowGroupCollection(shared_ptr info, BlockManager &block_manager, vector types, + idx_t row_start, idx_t total_rows = 0); -class IsNotNullFilter : public TableFilter { public: - IsNotNullFilter(); + idx_t GetTotalRows() const; + Allocator &GetAllocator() const; -public: - FilterPropagateResult CheckStatistics(BaseStatistics &stats) override; - string ToString(const string &column_name) override; - void Serialize(FieldWriter &writer) const override; - static unique_ptr Deserialize(FieldReader &source); -}; + void Initialize(PersistentTableData &data); + void InitializeEmpty(); -} // namespace duckdb + bool IsEmpty() const; + void AppendRowGroup(SegmentLock &l, idx_t start_row); + //! Get the nth row-group, negative numbers start from the back (so -1 is the last row group, etc) + RowGroup *GetRowGroup(int64_t index); + void Verify(); + void InitializeScan(CollectionScanState &state, const vector &column_ids, TableFilterSet *table_filters); + void InitializeCreateIndexScan(CreateIndexScanState &state); + void InitializeScanWithOffset(CollectionScanState &state, const vector &column_ids, idx_t start_row, + idx_t end_row); + static bool InitializeScanInRowGroup(CollectionScanState &state, RowGroup *row_group, idx_t vector_index, + idx_t max_row); + void InitializeParallelScan(ParallelCollectionScanState &state); + bool NextParallelScan(ClientContext &context, ParallelCollectionScanState &state, CollectionScanState &scan_state); + + bool Scan(Transaction &transaction, const vector &column_ids, + const std::function &fun); + bool Scan(Transaction &transaction, const std::function &fun); + + void Fetch(TransactionData transaction, DataChunk &result, const vector &column_ids, + Vector &row_identifiers, idx_t fetch_count, ColumnFetchState &state); + + //! Initialize an append of a variable number of rows. FinalizeAppend must be called after appending is done. + void InitializeAppend(TableAppendState &state); + //! Initialize an append with a known number of rows. FinalizeAppend should not be called after appending is done. + void InitializeAppend(TransactionData transaction, TableAppendState &state, idx_t append_count); + //! Appends to the row group collection. Returns true if a new row group has been created to append to + bool Append(DataChunk &chunk, TableAppendState &state); + //! FinalizeAppend flushes an append with a variable number of rows. + void FinalizeAppend(TransactionData transaction, TableAppendState &state); + void CommitAppend(transaction_t commit_id, idx_t row_start, idx_t count); + void RevertAppendInternal(idx_t start_row, idx_t count); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/storage_manager.hpp -// -// -//===----------------------------------------------------------------------===// + void MergeStorage(RowGroupCollection &data); + void RemoveFromIndexes(TableIndexList &indexes, Vector &row_identifiers, idx_t count); + idx_t Delete(TransactionData transaction, DataTable *table, row_t *ids, idx_t count); + void Update(TransactionData transaction, row_t *ids, const vector &column_ids, DataChunk &updates); + void UpdateColumn(TransactionData transaction, Vector &row_ids, const vector &column_path, + DataChunk &updates); + void Checkpoint(TableDataWriter &writer, vector> &global_stats); + void CommitDropColumn(idx_t index); + void CommitDropTable(); + vector> GetStorageInfo(); + const vector &GetTypes() const; -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/storage/write_ahead_log.hpp -// -// -//===----------------------------------------------------------------------===// + shared_ptr AddColumn(ClientContext &context, ColumnDefinition &new_column, + Expression *default_value); + shared_ptr RemoveColumn(idx_t col_idx); + shared_ptr AlterType(ClientContext &context, idx_t changed_idx, const LogicalType &target_type, + vector bound_columns, Expression &cast_expr); + void VerifyNewConstraint(DataTable &parent, const BoundConstraint &constraint); + + unique_ptr CopyStats(column_t column_id); + void SetStatistics(column_t column_id, const std::function &set_fun); +private: + bool IsEmpty(SegmentLock &) const; +private: + //! BlockManager + BlockManager &block_manager; + //! The number of rows in the table + atomic total_rows; + shared_ptr info; + vector types; + idx_t row_start; + //! The segment trees holding the various row_groups of the table + shared_ptr row_groups; + //! Table statistics + TableStatistics stats; +}; +} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/common/enums/wal_type.hpp +// duckdb/transaction/local_storage.hpp // // //===----------------------------------------------------------------------===// @@ -6173,1306 +7214,848 @@ class IsNotNullFilter : public TableFilter { -namespace duckdb { - -enum class WALType : uint8_t { - INVALID = 0, - // ----------------------------- - // Catalog - // ----------------------------- - CREATE_TABLE = 1, - DROP_TABLE = 2, - - CREATE_SCHEMA = 3, - DROP_SCHEMA = 4, - - CREATE_VIEW = 5, - DROP_VIEW = 6, - - CREATE_SEQUENCE = 8, - DROP_SEQUENCE = 9, - SEQUENCE_VALUE = 10, - - CREATE_MACRO = 11, - DROP_MACRO = 12, - - CREATE_TYPE = 13, - DROP_TYPE = 14, - - ALTER_INFO = 20, - - CREATE_TABLE_MACRO = 21, - DROP_TABLE_MACRO = 22, - // ----------------------------- - // Data - // ----------------------------- - USE_TABLE = 25, - INSERT_TUPLE = 26, - DELETE_TUPLE = 27, - UPDATE_TUPLE = 28, - // ----------------------------- - // Flush - // ----------------------------- - CHECKPOINT = 99, - WAL_FLUSH = 100 -}; -} +namespace duckdb { +class DataTable; +class WriteAheadLog; +struct TableAppendState; +class OptimisticDataWriter { +public: + OptimisticDataWriter(DataTable *table); + OptimisticDataWriter(DataTable *table, OptimisticDataWriter &parent); + ~OptimisticDataWriter(); + void CheckFlushToDisk(RowGroupCollection &row_groups); + //! Flushes a specific row group to disk + void FlushToDisk(RowGroup *row_group); + //! Flushes the final row group to disk (if any) + void FlushToDisk(RowGroupCollection &row_groups, bool force = false); + //! Final flush: flush the partial block manager to disk + void FinalFlush(); + void Rollback(); +private: + //! Prepare a write to disk + bool PrepareWrite(); +private: + //! The table + DataTable *table; + //! The partial block manager (if we created one yet) + unique_ptr partial_manager; + //! The set of blocks that have been pre-emptively written to disk + unordered_set written_blocks; +}; + +class LocalTableStorage : public std::enable_shared_from_this { +public: + // Create a new LocalTableStorage + explicit LocalTableStorage(DataTable &table); + // Create a LocalTableStorage from an ALTER TYPE + LocalTableStorage(ClientContext &context, DataTable &table, LocalTableStorage &parent, idx_t changed_idx, + const LogicalType &target_type, const vector &bound_columns, Expression &cast_expr); + // Create a LocalTableStorage from a DROP COLUMN + LocalTableStorage(DataTable &table, LocalTableStorage &parent, idx_t drop_idx); + // Create a LocalTableStorage from an ADD COLUMN + LocalTableStorage(ClientContext &context, DataTable &table, LocalTableStorage &parent, ColumnDefinition &new_column, + Expression *default_value); + ~LocalTableStorage(); -namespace duckdb { + DataTable *table; -struct AlterInfo; + Allocator &allocator; + //! The main chunk collection holding the data + shared_ptr row_groups; + //! The set of unique indexes + TableIndexList indexes; + //! The number of deleted rows + idx_t deleted_rows; + //! The main optimistic data writer + OptimisticDataWriter optimistic_writer; + //! The set of all optimistic data writers associated with this table + vector> optimistic_writers; + +public: + void InitializeScan(CollectionScanState &state, TableFilterSet *table_filters = nullptr); + //! Check if we should flush the previously written row-group to disk + void CheckFlushToDisk(); + //! Flushes the final row group to disk (if any) + void FlushToDisk(); + void Rollback(); + idx_t EstimatedSize(); + + void AppendToIndexes(Transaction &transaction, TableAppendState &append_state, idx_t append_count, + bool append_to_table); + bool AppendToIndexes(Transaction &transaction, RowGroupCollection &source, TableIndexList &index_list, + const vector &table_types, row_t &start_row); + + //! Creates an optimistic writer for this table + OptimisticDataWriter *CreateOptimisticWriter(); +}; + +class LocalTableManager { +public: + shared_ptr MoveEntry(DataTable *table); + unordered_map> MoveEntries(); + LocalTableStorage *GetStorage(DataTable *table); + LocalTableStorage *GetOrCreateStorage(DataTable *table); + idx_t EstimatedSize(); + bool IsEmpty(); + void InsertEntry(DataTable *table, shared_ptr entry); -class BufferedSerializer; -class Catalog; -class DatabaseInstance; -class SchemaCatalogEntry; -class SequenceCatalogEntry; -class ScalarMacroCatalogEntry; -class ViewCatalogEntry; -class TypeCatalogEntry; -class TableCatalogEntry; -class Transaction; -class TransactionManager; +private: + mutex table_storage_lock; + unordered_map> table_storage; +}; -//! The WriteAheadLog (WAL) is a log that is used to provide durability. Prior -//! to committing a transaction it writes the changes the transaction made to -//! the database to the log, which can then be replayed upon startup in case the -//! server crashes or is shut down. -class WriteAheadLog { +//! The LocalStorage class holds appends that have not been committed yet +class LocalStorage { public: - explicit WriteAheadLog(DatabaseInstance &database); + // Threshold to merge row groups instead of appending + static constexpr const idx_t MERGE_THRESHOLD = RowGroup::ROW_GROUP_SIZE / 2; - //! Whether or not the WAL has been initialized - bool initialized; - //! Skip writing to the WAL - bool skip_writing; +public: + struct CommitState { + unordered_map> append_states; + }; public: - //! Replay the WAL - static bool Replay(DatabaseInstance &database, string &path); + explicit LocalStorage(ClientContext &context, Transaction &transaction); - //! Initialize the WAL in the specified directory - void Initialize(string &path); - //! Returns the current size of the WAL in bytes - int64_t GetWALSize(); - //! Gets the total bytes written to the WAL since startup - idx_t GetTotalWritten(); + static LocalStorage &Get(Transaction &transaction); + static LocalStorage &Get(ClientContext &context); - void WriteCreateTable(TableCatalogEntry *entry); - void WriteDropTable(TableCatalogEntry *entry); + //! Initialize a scan of the local storage + void InitializeScan(DataTable *table, CollectionScanState &state, TableFilterSet *table_filters); + //! Scan + void Scan(CollectionScanState &state, const vector &column_ids, DataChunk &result); - void WriteCreateSchema(SchemaCatalogEntry *entry); - void WriteDropSchema(SchemaCatalogEntry *entry); + void InitializeParallelScan(DataTable *table, ParallelCollectionScanState &state); + bool NextParallelScan(ClientContext &context, DataTable *table, ParallelCollectionScanState &state, + CollectionScanState &scan_state); - void WriteCreateView(ViewCatalogEntry *entry); - void WriteDropView(ViewCatalogEntry *entry); + //! Begin appending to the local storage + void InitializeAppend(LocalAppendState &state, DataTable *table); + //! Append a chunk to the local storage + static void Append(LocalAppendState &state, DataChunk &chunk); + //! Finish appending to the local storage + static void FinalizeAppend(LocalAppendState &state); + //! Merge a row group collection into the transaction-local storage + void LocalMerge(DataTable *table, RowGroupCollection &collection); + //! Create an optimistic writer for the specified table + OptimisticDataWriter *CreateOptimisticWriter(DataTable *table); - void WriteCreateSequence(SequenceCatalogEntry *entry); - void WriteDropSequence(SequenceCatalogEntry *entry); - void WriteSequenceValue(SequenceCatalogEntry *entry, SequenceValue val); + //! Delete a set of rows from the local storage + idx_t Delete(DataTable *table, Vector &row_ids, idx_t count); + //! Update a set of rows in the local storage + void Update(DataTable *table, Vector &row_ids, const vector &column_ids, DataChunk &data); - void WriteCreateMacro(ScalarMacroCatalogEntry *entry); - void WriteDropMacro(ScalarMacroCatalogEntry *entry); + //! Commits the local storage, writing it to the WAL and completing the commit + void Commit(LocalStorage::CommitState &commit_state, Transaction &transaction); + //! Rollback the local storage + void Rollback(); - void WriteCreateTableMacro(TableMacroCatalogEntry *entry); - void WriteDropTableMacro(TableMacroCatalogEntry *entry); + bool ChangesMade() noexcept; + idx_t EstimatedSize(); - void WriteCreateType(TypeCatalogEntry *entry); - void WriteDropType(TypeCatalogEntry *entry); - //! Sets the table used for subsequent insert/delete/update commands - void WriteSetTable(string &schema, string &table); + bool Find(DataTable *table); - void WriteAlter(AlterInfo &info); + idx_t AddedRows(DataTable *table); - void WriteInsert(DataChunk &chunk); - void WriteDelete(DataChunk &chunk); - //! Write a single (sub-) column update to the WAL. Chunk must be a pair of (COL, ROW_ID). - //! The column_path vector is a *path* towards a column within the table - //! i.e. if we have a table with a single column S STRUCT(A INT, B INT) - //! and we update the validity mask of "S.B" - //! the column path is: - //! 0 (first column of table) - //! -> 1 (second subcolumn of struct) - //! -> 0 (first subcolumn of INT) - void WriteUpdate(DataChunk &chunk, const vector &column_path); + void AddColumn(DataTable *old_dt, DataTable *new_dt, ColumnDefinition &new_column, Expression *default_value); + void DropColumn(DataTable *old_dt, DataTable *new_dt, idx_t removed_column); + void ChangeType(DataTable *old_dt, DataTable *new_dt, idx_t changed_idx, const LogicalType &target_type, + const vector &bound_columns, Expression &cast_expr); - //! Truncate the WAL to a previous size, and clear anything currently set in the writer - void Truncate(int64_t size); - //! Delete the WAL file on disk. The WAL should not be used after this point. - void Delete(); - void Flush(); + void MoveStorage(DataTable *old_dt, DataTable *new_dt); + void FetchChunk(DataTable *table, Vector &row_ids, idx_t count, DataChunk &chunk); + TableIndexList &GetIndexes(DataTable *table); - void WriteCheckpoint(block_id_t meta_block); + void VerifyNewConstraint(DataTable &parent, const BoundConstraint &constraint); private: - DatabaseInstance &database; - unique_ptr writer; - string wal_path; + ClientContext &context; + Transaction &transaction; + LocalTableManager table_manager; + + void Flush(DataTable &table, LocalTableStorage &storage); }; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/table/data_table_info.hpp +// +// +//===----------------------------------------------------------------------===// -namespace duckdb { -class BlockManager; -class Catalog; -class DatabaseInstance; -class TransactionManager; -class TableCatalogEntry; -//! StorageManager is responsible for managing the physical storage of the -//! database on disk -class StorageManager { -public: - StorageManager(DatabaseInstance &db, string path, bool read_only); - ~StorageManager(); - //! The BlockManager to read/store meta information and data in blocks - unique_ptr block_manager; - //! The BufferManager of the database - unique_ptr buffer_manager; - //! The database this storagemanager belongs to - DatabaseInstance &db; -public: - static StorageManager &GetStorageManager(ClientContext &context); - static StorageManager &GetStorageManager(DatabaseInstance &db); - //! Initialize a database or load an existing database from the given path - void Initialize(); - //! Get the WAL of the StorageManager, returns nullptr if in-memory - WriteAheadLog *GetWriteAheadLog() { - return wal.initialized ? &wal : nullptr; - } - DatabaseInstance &GetDatabase() { - return db; - } - void CreateCheckpoint(bool delete_wal = false, bool force_checkpoint = false); +namespace duckdb { +class DatabaseInstance; +class TableIOManager; - string GetDBPath() { - return path; +struct DataTableInfo { + DataTableInfo(DatabaseInstance &db, shared_ptr table_io_manager_p, string schema, string table) + : db(db), table_io_manager(move(table_io_manager_p)), cardinality(0), schema(move(schema)), table(move(table)) { } - bool InMemory(); -private: - //! Load the database from a directory - void LoadDatabase(); + //! The database instance of the table + DatabaseInstance &db; + //! The table IO manager + shared_ptr table_io_manager; + //! The amount of elements in the table. Note that this number signifies the amount of COMMITTED entries in the + //! table. It can be inaccurate inside of transactions. More work is needed to properly support that. + atomic cardinality; + // schema of the table + string schema; + // name of the table + string table; - //! The path of the database - string path; - //! The WriteAheadLog of the storage manager - WriteAheadLog wal; + TableIndexList indexes; - //! Whether or not the database is opened in read-only mode - bool read_only; + bool IsTemporary() { + return schema == TEMP_SCHEMA; + } }; } // namespace duckdb -#include - namespace duckdb { +class ClientContext; +class ColumnDataCollection; +class ColumnDefinition; +class DataTable; +class OptimisticDataWriter; +class RowGroup; +class StorageManager; +class TableCatalogEntry; +class TableIOManager; +class Transaction; +class WriteAheadLog; +class TableDataWriter; -const string &TableCatalogEntry::GetColumnName(column_t index) { - return columns[index].Name(); -} +//! DataTable represents a physical table on disk +class DataTable { +public: + //! Constructs a new data table from an (optional) set of persistent segments + DataTable(DatabaseInstance &db, shared_ptr table_io_manager, const string &schema, + const string &table, vector column_definitions_p, + unique_ptr data = nullptr); + //! Constructs a DataTable as a delta on an existing data table with a newly added column + DataTable(ClientContext &context, DataTable &parent, ColumnDefinition &new_column, Expression *default_value); + //! Constructs a DataTable as a delta on an existing data table but with one column removed + DataTable(ClientContext &context, DataTable &parent, idx_t removed_column); + //! Constructs a DataTable as a delta on an existing data table but with one column changed type + DataTable(ClientContext &context, DataTable &parent, idx_t changed_idx, const LogicalType &target_type, + const vector &bound_columns, Expression &cast_expr); + //! Constructs a DataTable as a delta on an existing data table but with one column added new constraint + DataTable(ClientContext &context, DataTable &parent, unique_ptr constraint); -column_t TableCatalogEntry::GetColumnIndex(string &column_name, bool if_exists) { - auto entry = name_map.find(column_name); - if (entry == name_map.end()) { - // entry not found: try lower-casing the name - entry = name_map.find(StringUtil::Lower(column_name)); - if (entry == name_map.end()) { - if (if_exists) { - return DConstants::INVALID_INDEX; - } - throw BinderException("Table \"%s\" does not have a column with name \"%s\"", name, column_name); - } - } - if (entry->second == COLUMN_IDENTIFIER_ROW_ID) { - column_name = "rowid"; - return COLUMN_IDENTIFIER_ROW_ID; - } - column_name = GetColumnName(entry->second); - return entry->second; -} + //! The table info + shared_ptr info; + //! The set of physical columns stored by this DataTable + vector column_definitions; + //! A reference to the database instance + DatabaseInstance &db; -void AddDataTableIndex(DataTable *storage, vector &columns, vector &keys, - IndexConstraintType constraint_type, BlockPointer *index_block = nullptr) { - // fetch types and create expressions for the index from the columns - vector column_ids; - vector> unbound_expressions; - vector> bound_expressions; - idx_t key_nr = 0; - for (auto &key : keys) { - D_ASSERT(key < columns.size()); - auto &column = columns[key]; - if (column.Generated()) { - throw InvalidInputException("Creating index on generated column is not supported"); - } +public: + //! Returns a list of types of the table + vector GetTypes(); - unbound_expressions.push_back(make_unique(columns[key].Name(), columns[key].Type(), - ColumnBinding(0, column_ids.size()))); + void InitializeScan(TableScanState &state, const vector &column_ids, + TableFilterSet *table_filter = nullptr); + void InitializeScan(Transaction &transaction, TableScanState &state, const vector &column_ids, + TableFilterSet *table_filters = nullptr); - bound_expressions.push_back(make_unique(columns[key].Type(), key_nr++)); - column_ids.push_back(column.StorageOid()); - } - // create an adaptive radix tree around the expressions - if (index_block) { - auto art = make_unique(column_ids, move(unbound_expressions), constraint_type, storage->db, - index_block->block_id, index_block->offset); - storage->info->indexes.AddIndex(move(art)); - } else { - auto art = make_unique(column_ids, move(unbound_expressions), constraint_type, storage->db); - storage->AddIndex(move(art), bound_expressions); - } -} + //! Returns the maximum amount of threads that should be assigned to scan this data table + idx_t MaxThreads(ClientContext &context); + void InitializeParallelScan(ClientContext &context, ParallelTableScanState &state); + bool NextParallelScan(ClientContext &context, ParallelTableScanState &state, TableScanState &scan_state); -TableCatalogEntry::TableCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, BoundCreateTableInfo *info, - std::shared_ptr inherited_storage) - : StandardEntry(CatalogType::TABLE_ENTRY, schema, catalog, info->Base().table), storage(move(inherited_storage)), - columns(move(info->Base().columns)), constraints(move(info->Base().constraints)), - bound_constraints(move(info->bound_constraints)), - column_dependency_manager(move(info->column_dependency_manager)) { - this->temporary = info->Base().temporary; - // add lower case aliases - this->name_map = move(info->name_map); -#ifdef DEBUG - D_ASSERT(name_map.size() == columns.size()); - for (idx_t i = 0; i < columns.size(); i++) { - D_ASSERT(name_map[columns[i].Name()] == i); - } -#endif - // add the "rowid" alias, if there is no rowid column specified in the table - if (name_map.find("rowid") == name_map.end()) { - name_map["rowid"] = COLUMN_IDENTIFIER_ROW_ID; - } - if (!storage) { - // create the physical storage - vector storage_columns; - vector get_columns; - for (auto &col_def : columns) { - get_columns.push_back(col_def.Copy()); - if (col_def.Generated()) { - continue; - } - storage_columns.push_back(col_def.Copy()); - } - storage = make_shared(catalog->db, schema->name, name, move(storage_columns), move(info->data)); - // create the unique indexes for the UNIQUE and PRIMARY KEY and FOREIGN KEY constraints - idx_t indexes_idx = 0; - for (idx_t i = 0; i < bound_constraints.size(); i++) { - auto &constraint = bound_constraints[i]; - if (constraint->type == ConstraintType::UNIQUE) { - // unique constraint: create a unique index - auto &unique = (BoundUniqueConstraint &)*constraint; - IndexConstraintType constraint_type = IndexConstraintType::UNIQUE; - if (unique.is_primary_key) { - constraint_type = IndexConstraintType::PRIMARY; - } - if (info->indexes.empty()) { - AddDataTableIndex(storage.get(), get_columns, unique.keys, constraint_type); - } else { - AddDataTableIndex(storage.get(), get_columns, unique.keys, constraint_type, - &info->indexes[indexes_idx++]); - } - } else if (constraint->type == ConstraintType::FOREIGN_KEY) { - // foreign key constraint: create a foreign key index - auto &bfk = (BoundForeignKeyConstraint &)*constraint; - if (bfk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE || - bfk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { - if (info->indexes.empty()) { - AddDataTableIndex(storage.get(), get_columns, bfk.info.fk_keys, IndexConstraintType::FOREIGN); - } else { - AddDataTableIndex(storage.get(), get_columns, bfk.info.fk_keys, IndexConstraintType::FOREIGN, - &info->indexes[indexes_idx++]); - } - } - } - } - } -} + //! Scans up to STANDARD_VECTOR_SIZE elements from the table starting + //! from offset and store them in result. Offset is incremented with how many + //! elements were returned. + //! Returns true if all pushed down filters were executed during data fetching + void Scan(Transaction &transaction, DataChunk &result, TableScanState &state); -bool TableCatalogEntry::ColumnExists(const string &name) { - auto iterator = name_map.find(name); - if (iterator == name_map.end()) { - return false; - } - return true; -} + //! Fetch data from the specific row identifiers from the base table + void Fetch(Transaction &transaction, DataChunk &result, const vector &column_ids, Vector &row_ids, + idx_t fetch_count, ColumnFetchState &state); -idx_t TableCatalogEntry::StandardColumnCount() const { - idx_t count = 0; - for (auto &col : columns) { - if (col.Category() == TableColumnType::STANDARD) { - count++; - } - } - return count; -} + //! Initializes an append to transaction-local storage + void InitializeLocalAppend(LocalAppendState &state, ClientContext &context); + //! Append a DataChunk to the transaction-local storage of the table. + void LocalAppend(LocalAppendState &state, TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); + //! Finalizes a transaction-local append + void FinalizeLocalAppend(LocalAppendState &state); + //! Append a chunk to the transaction-local storage of this table + void LocalAppend(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); + //! Append a column data collection to the transaction-local storage of this table + void LocalAppend(TableCatalogEntry &table, ClientContext &context, ColumnDataCollection &collection); + //! Merge a row group collection into the transaction-local storage + void LocalMerge(ClientContext &context, RowGroupCollection &collection); + //! Creates an optimistic writer for this table - used for optimistically writing parallel appends + OptimisticDataWriter *CreateOptimisticWriter(ClientContext &context); -unique_ptr TableCatalogEntry::GetStatistics(ClientContext &context, column_t column_id) { - if (column_id == COLUMN_IDENTIFIER_ROW_ID) { - return nullptr; - } - if (column_id >= columns.size()) { - throw InternalException("TableCatalogEntry::GetStatistics column_id out of range"); - } - if (columns[column_id].Generated()) { - return nullptr; - } - return storage->GetStatistics(context, columns[column_id].StorageOid()); -} + //! Delete the entries with the specified row identifier from the table + idx_t Delete(TableCatalogEntry &table, ClientContext &context, Vector &row_ids, idx_t count); + //! Update the entries with the specified row identifier from the table + void Update(TableCatalogEntry &table, ClientContext &context, Vector &row_ids, + const vector &column_ids, DataChunk &data); + //! Update a single (sub-)column along a column path + //! The column_path vector is a *path* towards a column within the table + //! i.e. if we have a table with a single column S STRUCT(A INT, B INT) + //! and we update the validity mask of "S.B" + //! the column path is: + //! 0 (first column of table) + //! -> 1 (second subcolumn of struct) + //! -> 0 (first subcolumn of INT) + //! This method should only be used from the WAL replay. It does not verify update constraints. + void UpdateColumn(TableCatalogEntry &table, ClientContext &context, Vector &row_ids, + const vector &column_path, DataChunk &updates); -unique_ptr TableCatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { - D_ASSERT(!internal); - if (info->type != AlterType::ALTER_TABLE) { - throw CatalogException("Can only modify table with ALTER TABLE statement"); - } - auto table_info = (AlterTableInfo *)info; - switch (table_info->alter_table_type) { - case AlterTableType::RENAME_COLUMN: { - auto rename_info = (RenameColumnInfo *)table_info; - return RenameColumn(context, *rename_info); - } - case AlterTableType::RENAME_TABLE: { - auto rename_info = (RenameTableInfo *)table_info; - auto copied_table = Copy(context); - copied_table->name = rename_info->new_table_name; - return copied_table; - } - case AlterTableType::ADD_COLUMN: { - auto add_info = (AddColumnInfo *)table_info; - return AddColumn(context, *add_info); - } - case AlterTableType::REMOVE_COLUMN: { - auto remove_info = (RemoveColumnInfo *)table_info; - return RemoveColumn(context, *remove_info); - } - case AlterTableType::SET_DEFAULT: { - auto set_default_info = (SetDefaultInfo *)table_info; - return SetDefault(context, *set_default_info); - } - case AlterTableType::ALTER_COLUMN_TYPE: { - auto change_type_info = (ChangeColumnTypeInfo *)table_info; - return ChangeColumnType(context, *change_type_info); - } - case AlterTableType::FOREIGN_KEY_CONSTRAINT: { - auto foreign_key_constraint_info = (AlterForeignKeyInfo *)table_info; - if (foreign_key_constraint_info->type == AlterForeignKeyType::AFT_ADD) { - return AddForeignKeyConstraint(context, *foreign_key_constraint_info); - } else { - return DropForeignKeyConstraint(context, *foreign_key_constraint_info); - } - } - case AlterTableType::SET_NOT_NULL: { - auto set_not_null_info = (SetNotNullInfo *)table_info; - return SetNotNull(context, *set_not_null_info); - } - case AlterTableType::DROP_NOT_NULL: { - auto drop_not_null_info = (DropNotNullInfo *)table_info; - return DropNotNull(context, *drop_not_null_info); - } - default: - throw InternalException("Unrecognized alter table type!"); - } -} + //! Fetches an append lock + void AppendLock(TableAppendState &state); + //! Begin appending structs to this table, obtaining necessary locks, etc + void InitializeAppend(Transaction &transaction, TableAppendState &state, idx_t append_count); + //! Append a chunk to the table using the AppendState obtained from InitializeAppend + void Append(DataChunk &chunk, TableAppendState &state); + //! Commit the append + void CommitAppend(transaction_t commit_id, idx_t row_start, idx_t count); + //! Write a segment of the table to the WAL + void WriteToLog(WriteAheadLog &log, idx_t row_start, idx_t count); + //! Revert a set of appends made by the given AppendState, used to revert appends in the event of an error during + //! commit (e.g. because of an I/O exception) + void RevertAppend(idx_t start_row, idx_t count); + void RevertAppendInternal(idx_t start_row, idx_t count); -static void RenameExpression(ParsedExpression &expr, RenameColumnInfo &info) { - if (expr.type == ExpressionType::COLUMN_REF) { - auto &colref = (ColumnRefExpression &)expr; - if (colref.column_names.back() == info.old_name) { - colref.column_names.back() = info.new_name; - } - } - ParsedExpressionIterator::EnumerateChildren( - expr, [&](const ParsedExpression &child) { RenameExpression((ParsedExpression &)child, info); }); -} + void ScanTableSegment(idx_t start_row, idx_t count, const std::function &function); -unique_ptr TableCatalogEntry::RenameColumn(ClientContext &context, RenameColumnInfo &info) { - auto rename_idx = GetColumnIndex(info.old_name); - if (rename_idx == COLUMN_IDENTIFIER_ROW_ID) { - throw CatalogException("Cannot rename rowid column"); - } - auto create_info = make_unique(schema->name, name); - create_info->temporary = temporary; - for (idx_t i = 0; i < columns.size(); i++) { - auto copy = columns[i].Copy(); + //! Merge a row group collection directly into this table - appending it to the end of the table without copying + void MergeStorage(RowGroupCollection &data, TableIndexList &indexes); - if (rename_idx == i) { - copy.SetName(info.new_name); - } - create_info->columns.push_back(move(copy)); - auto &col = create_info->columns[i]; - if (col.Generated() && column_dependency_manager.IsDependencyOf(i, rename_idx)) { - RenameExpression(col.GeneratedExpressionMutable(), info); - } + //! Append a chunk with the row ids [row_start, ..., row_start + chunk.size()] to all indexes of the table, returns + //! whether or not the append succeeded + bool AppendToIndexes(DataChunk &chunk, row_t row_start); + static bool AppendToIndexes(TableIndexList &indexes, DataChunk &chunk, row_t row_start); + //! Remove a chunk with the row ids [row_start, ..., row_start + chunk.size()] from all indexes of the table + void RemoveFromIndexes(TableAppendState &state, DataChunk &chunk, row_t row_start); + //! Remove the chunk with the specified set of row identifiers from all indexes of the table + void RemoveFromIndexes(TableAppendState &state, DataChunk &chunk, Vector &row_identifiers); + //! Remove the row identifiers from all the indexes of the table + void RemoveFromIndexes(Vector &row_identifiers, idx_t count); + + void SetAsRoot() { + this->is_root = true; } - for (idx_t c_idx = 0; c_idx < constraints.size(); c_idx++) { - auto copy = constraints[c_idx]->Copy(); - switch (copy->type) { - case ConstraintType::NOT_NULL: - // NOT NULL constraint: no adjustments necessary - break; - case ConstraintType::CHECK: { - // CHECK constraint: need to rename column references that refer to the renamed column - auto &check = (CheckConstraint &)*copy; - RenameExpression(*check.expression, info); - break; - } - case ConstraintType::UNIQUE: { - // UNIQUE constraint: possibly need to rename columns - auto &unique = (UniqueConstraint &)*copy; - for (idx_t i = 0; i < unique.columns.size(); i++) { - if (unique.columns[i] == info.old_name) { - unique.columns[i] = info.new_name; - } - } - break; - } - case ConstraintType::FOREIGN_KEY: { - // FOREIGN KEY constraint: possibly need to rename columns - auto &fk = (ForeignKeyConstraint &)*copy; - vector columns = fk.pk_columns; - if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { - columns = fk.fk_columns; - } else if (fk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { - for (idx_t i = 0; i < fk.fk_columns.size(); i++) { - columns.push_back(fk.fk_columns[i]); - } - } - for (idx_t i = 0; i < columns.size(); i++) { - if (columns[i] == info.old_name) { - throw CatalogException( - "Cannot rename column \"%s\" because this is involved in the foreign key constraint", - info.old_name); - } - } - break; - } - default: - throw InternalException("Unsupported constraint for entry!"); - } - create_info->constraints.push_back(move(copy)); + bool IsRoot() { + return this->is_root; } - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); -} -unique_ptr TableCatalogEntry::AddColumn(ClientContext &context, AddColumnInfo &info) { - auto col_name = info.new_column.GetName(); + //! Get statistics of a physical column within the table + unique_ptr GetStatistics(ClientContext &context, column_t column_id); + //! Sets statistics of a physical column within the table + void SetStatistics(column_t column_id, const std::function &set_fun); - // We're checking for the opposite condition (ADD COLUMN IF _NOT_ EXISTS ...). - if (info.if_column_not_exists && GetColumnIndex(col_name, true) != DConstants::INVALID_INDEX) { - return nullptr; - } + //! Checkpoint the table to the specified table data writer + void Checkpoint(TableDataWriter &writer); + void CommitDropTable(); + void CommitDropColumn(idx_t index); - auto create_info = make_unique(schema->name, name); - create_info->temporary = temporary; + idx_t GetTotalRows(); - for (idx_t i = 0; i < columns.size(); i++) { - create_info->columns.push_back(columns[i].Copy()); - } - for (auto &constraint : constraints) { - create_info->constraints.push_back(constraint->Copy()); - } - Binder::BindLogicalType(context, info.new_column.TypeMutable(), schema->name); - info.new_column.SetOid(columns.size()); - info.new_column.SetStorageOid(storage->column_definitions.size()); + vector> GetStorageInfo(); + static bool IsForeignKeyIndex(const vector &fk_keys, Index &index, ForeignKeyType fk_type); - auto col = info.new_column.Copy(); + //! Initializes a special scan that is used to create an index on the table, it keeps locks on the table + void InitializeCreateIndexScan(CreateIndexScanState &state, const vector &column_ids); + //! Scans the next chunk for the CREATE INDEX operator + bool CreateIndexScan(TableScanState &state, DataChunk &result, TableScanType type); - create_info->columns.push_back(move(col)); + //! Verify constraints with a chunk from the Append containing all columns of the table + void VerifyAppendConstraints(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - auto new_storage = - make_shared(context, *storage, info.new_column, bound_create_info->bound_defaults.back().get()); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), - new_storage); -} +private: + //! Verify the new added constraints against current persistent&local data + void VerifyNewConstraint(ClientContext &context, DataTable &parent, const BoundConstraint *constraint); + //! Verify constraints with a chunk from the Update containing only the specified column_ids + void VerifyUpdateConstraints(ClientContext &context, TableCatalogEntry &table, DataChunk &chunk, + const vector &column_ids); + //! Verify constraints with a chunk from the Delete containing all columns of the table + void VerifyDeleteConstraints(TableCatalogEntry &table, ClientContext &context, DataChunk &chunk); -unique_ptr TableCatalogEntry::RemoveColumn(ClientContext &context, RemoveColumnInfo &info) { - auto removed_index = GetColumnIndex(info.removed_column, info.if_column_exists); - if (removed_index == DConstants::INVALID_INDEX) { - if (!info.if_column_exists) { - throw CatalogException("Cannot drop column: rowid column cannot be dropped"); - } - return nullptr; - } + void InitializeScanWithOffset(TableScanState &state, const vector &column_ids, idx_t start_row, + idx_t end_row); - auto create_info = make_unique(schema->name, name); - create_info->temporary = temporary; +private: + //! Lock for appending entries to the table + mutex append_lock; + //! The row groups of the table + shared_ptr row_groups; + //! Whether or not the data table is the root DataTable for this table; the root DataTable is the newest version + //! that can be appended to + atomic is_root; +}; +} // namespace duckdb - unordered_set removed_columns; - if (column_dependency_manager.HasDependents(removed_index)) { - removed_columns = column_dependency_manager.GetDependents(removed_index); - } - if (!removed_columns.empty() && !info.cascade) { - throw CatalogException("Cannot drop column: column is a dependency of 1 or more generated column(s)"); - } - for (idx_t i = 0; i < columns.size(); i++) { - auto &col = columns[i]; - if (i == removed_index || removed_columns.count(i)) { - continue; - } - create_info->columns.push_back(col.Copy()); - } - if (create_info->columns.empty()) { - throw CatalogException("Cannot drop column: table only has one column remaining!"); - } - vector adjusted_indices = column_dependency_manager.RemoveColumn(removed_index, columns.size()); - // handle constraints for the new table - D_ASSERT(constraints.size() == bound_constraints.size()); - for (idx_t constr_idx = 0; constr_idx < constraints.size(); constr_idx++) { - auto &constraint = constraints[constr_idx]; - auto &bound_constraint = bound_constraints[constr_idx]; - switch (constraint->type) { - case ConstraintType::NOT_NULL: { - auto ¬_null_constraint = (BoundNotNullConstraint &)*bound_constraint; - if (not_null_constraint.index != removed_index) { - // the constraint is not about this column: we need to copy it - // we might need to shift the index back by one though, to account for the removed column - idx_t new_index = not_null_constraint.index; - new_index = adjusted_indices[new_index]; - create_info->constraints.push_back(make_unique(new_index)); - } - break; - } - case ConstraintType::CHECK: { - // CHECK constraint - auto &bound_check = (BoundCheckConstraint &)*bound_constraint; - // check if the removed column is part of the check constraint - if (bound_check.bound_columns.find(removed_index) != bound_check.bound_columns.end()) { - if (bound_check.bound_columns.size() > 1) { - // CHECK constraint that concerns mult - throw CatalogException( - "Cannot drop column \"%s\" because there is a CHECK constraint that depends on it", - info.removed_column); - } else { - // CHECK constraint that ONLY concerns this column, strip the constraint - } - } else { - // check constraint does not concern the removed column: simply re-add it - create_info->constraints.push_back(constraint->Copy()); - } - break; - } - case ConstraintType::UNIQUE: { - auto copy = constraint->Copy(); - auto &unique = (UniqueConstraint &)*copy; - if (unique.index != DConstants::INVALID_INDEX) { - if (unique.index == removed_index) { - throw CatalogException( - "Cannot drop column \"%s\" because there is a UNIQUE constraint that depends on it", - info.removed_column); - } - unique.index = adjusted_indices[unique.index]; - } - create_info->constraints.push_back(move(copy)); - break; - } - case ConstraintType::FOREIGN_KEY: { - auto copy = constraint->Copy(); - auto &fk = (ForeignKeyConstraint &)*copy; - vector columns = fk.pk_columns; - if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { - columns = fk.fk_columns; - } else if (fk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { - for (idx_t i = 0; i < fk.fk_columns.size(); i++) { - columns.push_back(fk.fk_columns[i]); - } - } - for (idx_t i = 0; i < columns.size(); i++) { - if (columns[i] == info.removed_column) { - throw CatalogException( - "Cannot drop column \"%s\" because there is a FOREIGN KEY constraint that depends on it", - info.removed_column); - } - } - create_info->constraints.push_back(move(copy)); - break; - } - default: - throw InternalException("Unsupported constraint for entry!"); - } - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/art.hpp +// +// +//===----------------------------------------------------------------------===// - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - if (columns[removed_index].Generated()) { - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), - storage); - } - auto new_storage = make_shared(context, *storage, columns[removed_index].StorageOid()); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), - new_storage); -} -unique_ptr TableCatalogEntry::SetDefault(ClientContext &context, SetDefaultInfo &info) { - auto create_info = make_unique(schema->name, name); - auto default_idx = GetColumnIndex(info.column_name); - if (default_idx == COLUMN_IDENTIFIER_ROW_ID) { - throw CatalogException("Cannot SET DEFAULT for rowid column"); - } - // Copy all the columns, changing the value of the one that was specified by 'column_name' - for (idx_t i = 0; i < columns.size(); i++) { - auto copy = columns[i].Copy(); - if (default_idx == i) { - // set the default value of this column - if (copy.Generated()) { - throw BinderException("Cannot SET DEFAULT for generated column \"%s\"", columns[i].Name()); - } - copy.SetDefaultValue(info.expression ? info.expression->Copy() : nullptr); - } - create_info->columns.push_back(move(copy)); - } - // Copy all the constraints - for (idx_t i = 0; i < constraints.size(); i++) { - auto constraint = constraints[i]->Copy(); - create_info->constraints.push_back(move(constraint)); - } - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); -} -unique_ptr TableCatalogEntry::SetNotNull(ClientContext &context, SetNotNullInfo &info) { - auto create_info = make_unique(schema->name, name); - for (idx_t i = 0; i < columns.size(); i++) { - auto copy = columns[i].Copy(); - create_info->columns.push_back(move(copy)); - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/art_key.hpp +// +// +//===----------------------------------------------------------------------===// - idx_t not_null_idx = GetColumnIndex(info.column_name); - if (columns[not_null_idx].Generated()) { - throw BinderException("Unsupported constraint for generated column!"); - } - bool has_not_null = false; - for (idx_t i = 0; i < constraints.size(); i++) { - auto constraint = constraints[i]->Copy(); - if (constraint->type == ConstraintType::NOT_NULL) { - auto ¬_null = (NotNullConstraint &)*constraint; - if (not_null.index == not_null_idx) { - has_not_null = true; - } - } - create_info->constraints.push_back(move(constraint)); - } - if (!has_not_null) { - create_info->constraints.push_back(make_unique(not_null_idx)); - } - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - // Early return - if (has_not_null) { - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), - storage); - } - // Return with new storage info. Note that we need the bound column index here. - auto new_storage = make_shared(context, *storage, - make_unique(columns[not_null_idx].StorageOid())); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), - new_storage); -} -unique_ptr TableCatalogEntry::DropNotNull(ClientContext &context, DropNotNullInfo &info) { - auto create_info = make_unique(schema->name, name); - for (idx_t i = 0; i < columns.size(); i++) { - auto copy = columns[i].Copy(); - create_info->columns.push_back(move(copy)); - } - idx_t not_null_idx = GetColumnIndex(info.column_name); - for (idx_t i = 0; i < constraints.size(); i++) { - auto constraint = constraints[i]->Copy(); - // Skip/drop not_null - if (constraint->type == ConstraintType::NOT_NULL) { - auto ¬_null = (NotNullConstraint &)*constraint; - if (not_null.index == not_null_idx) { - continue; - } - } - create_info->constraints.push_back(move(constraint)); - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/bit_operations.hpp +// +// +//===----------------------------------------------------------------------===// - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); -} -unique_ptr TableCatalogEntry::ChangeColumnType(ClientContext &context, ChangeColumnTypeInfo &info) { - if (info.target_type.id() == LogicalTypeId::USER) { - auto &catalog = Catalog::GetCatalog(context); - info.target_type = catalog.GetType(context, schema->name, UserType::GetTypeName(info.target_type)); - } - auto change_idx = GetColumnIndex(info.column_name); - auto create_info = make_unique(schema->name, name); - create_info->temporary = temporary; - for (idx_t i = 0; i < columns.size(); i++) { - auto copy = columns[i].Copy(); - if (change_idx == i) { - // set the type of this column - if (copy.Generated()) { - throw NotImplementedException("Changing types of generated columns is not supported yet"); - // copy.ChangeGeneratedExpressionType(info.target_type); - } - copy.SetType(info.target_type); - } - // TODO: check if the generated_expression breaks, only delete it if it does - if (copy.Generated() && column_dependency_manager.IsDependencyOf(i, change_idx)) { - throw BinderException( - "This column is referenced by the generated column \"%s\", so its type can not be changed", - copy.Name()); - } - create_info->columns.push_back(move(copy)); - } - for (idx_t i = 0; i < constraints.size(); i++) { - auto constraint = constraints[i]->Copy(); - switch (constraint->type) { - case ConstraintType::CHECK: { - auto &bound_check = (BoundCheckConstraint &)*bound_constraints[i]; - if (bound_check.bound_columns.find(change_idx) != bound_check.bound_columns.end()) { - throw BinderException("Cannot change the type of a column that has a CHECK constraint specified"); - } - break; - } - case ConstraintType::NOT_NULL: - break; - case ConstraintType::UNIQUE: { - auto &bound_unique = (BoundUniqueConstraint &)*bound_constraints[i]; - if (bound_unique.key_set.find(change_idx) != bound_unique.key_set.end()) { - throw BinderException( - "Cannot change the type of a column that has a UNIQUE or PRIMARY KEY constraint specified"); - } - break; - } - case ConstraintType::FOREIGN_KEY: { - auto &bfk = (BoundForeignKeyConstraint &)*bound_constraints[i]; - unordered_set key_set = bfk.pk_key_set; - if (bfk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { - key_set = bfk.fk_key_set; - } else if (bfk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { - for (idx_t i = 0; i < bfk.info.fk_keys.size(); i++) { - key_set.insert(bfk.info.fk_keys[i]); - } - } - if (key_set.find(change_idx) != key_set.end()) { - throw BinderException("Cannot change the type of a column that has a FOREIGN KEY constraint specified"); - } - break; - } - default: - throw InternalException("Unsupported constraint for entry!"); - } - create_info->constraints.push_back(move(constraint)); - } - auto binder = Binder::CreateBinder(context); - // bind the specified expression - vector bound_columns; - AlterBinder expr_binder(*binder, context, *this, bound_columns, info.target_type); - auto expression = info.expression->Copy(); - auto bound_expression = expr_binder.Bind(expression); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - vector storage_oids; - if (bound_columns.empty()) { - storage_oids.push_back(COLUMN_IDENTIFIER_ROW_ID); - } - // transform to storage_oid - else { - for (idx_t i = 0; i < bound_columns.size(); i++) { - storage_oids.push_back(columns[bound_columns[i]].StorageOid()); - } - } - auto new_storage = make_shared(context, *storage, columns[change_idx].StorageOid(), info.target_type, - move(storage_oids), *bound_expression); - auto result = - make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), new_storage); - return move(result); -} -unique_ptr TableCatalogEntry::AddForeignKeyConstraint(ClientContext &context, AlterForeignKeyInfo &info) { - D_ASSERT(info.type == AlterForeignKeyType::AFT_ADD); - auto create_info = make_unique(schema->name, name); - create_info->temporary = temporary; - for (idx_t i = 0; i < columns.size(); i++) { - create_info->columns.push_back(columns[i].Copy()); - } - for (idx_t i = 0; i < constraints.size(); i++) { - create_info->constraints.push_back(constraints[i]->Copy()); - } - ForeignKeyInfo fk_info; - fk_info.type = ForeignKeyType::FK_TYPE_PRIMARY_KEY_TABLE; - fk_info.schema = info.schema; - fk_info.table = info.fk_table; - fk_info.pk_keys = info.pk_keys; - fk_info.fk_keys = info.fk_keys; - create_info->constraints.push_back( - make_unique(info.pk_columns, info.fk_columns, move(fk_info))); - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); +#include +#include // strlen() on Solaris +#include - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); -} +namespace duckdb { -unique_ptr TableCatalogEntry::DropForeignKeyConstraint(ClientContext &context, - AlterForeignKeyInfo &info) { - D_ASSERT(info.type == AlterForeignKeyType::AFT_DELETE); - auto create_info = make_unique(schema->name, name); - create_info->temporary = temporary; +#define BSWAP16(x) ((uint16_t)((((uint16_t)(x)&0xff00) >> 8) | (((uint16_t)(x)&0x00ff) << 8))) - for (idx_t i = 0; i < columns.size(); i++) { - create_info->columns.push_back(columns[i].Copy()); - } - for (idx_t i = 0; i < constraints.size(); i++) { - auto constraint = constraints[i]->Copy(); - if (constraint->type == ConstraintType::FOREIGN_KEY) { - ForeignKeyConstraint &fk = (ForeignKeyConstraint &)*constraint; - if (fk.info.type == ForeignKeyType::FK_TYPE_PRIMARY_KEY_TABLE && fk.info.table == info.fk_table) { - continue; - } - } - create_info->constraints.push_back(move(constraint)); - } +#define BSWAP32(x) \ + ((uint32_t)((((uint32_t)(x)&0xff000000) >> 24) | (((uint32_t)(x)&0x00ff0000) >> 8) | \ + (((uint32_t)(x)&0x0000ff00) << 8) | (((uint32_t)(x)&0x000000ff) << 24))) - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); +#define BSWAP64(x) \ + ((uint64_t)((((uint64_t)(x)&0xff00000000000000ull) >> 56) | (((uint64_t)(x)&0x00ff000000000000ull) >> 40) | \ + (((uint64_t)(x)&0x0000ff0000000000ull) >> 24) | (((uint64_t)(x)&0x000000ff00000000ull) >> 8) | \ + (((uint64_t)(x)&0x00000000ff000000ull) << 8) | (((uint64_t)(x)&0x0000000000ff0000ull) << 24) | \ + (((uint64_t)(x)&0x000000000000ff00ull) << 40) | (((uint64_t)(x)&0x00000000000000ffull) << 56))) - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); -} +struct Radix { +public: + static inline bool IsLittleEndian() { + int n = 1; + if (*(char *)&n == 1) { + return true; + } else { + return false; + } + } -ColumnDefinition &TableCatalogEntry::GetColumn(const string &name) { - auto entry = name_map.find(name); - if (entry == name_map.end() || entry->second == COLUMN_IDENTIFIER_ROW_ID) { - throw CatalogException("Column with name %s does not exist!", name); + template + static inline void EncodeData(data_ptr_t dataptr, T value) { + throw NotImplementedException("Cannot create data from this type"); } - auto column_index = entry->second; - return columns[column_index]; -} -vector TableCatalogEntry::GetTypes() { - vector types; - for (auto &it : columns) { - if (it.Generated()) { - continue; + static inline void EncodeStringDataPrefix(data_ptr_t dataptr, string_t value, idx_t prefix_len) { + auto len = value.GetSize(); + memcpy(dataptr, value.GetDataUnsafe(), MinValue(len, prefix_len)); + if (len < prefix_len) { + memset(dataptr + len, '\0', prefix_len - len); } - types.push_back(it.Type()); } - return types; -} - -void TableCatalogEntry::Serialize(Serializer &serializer) { - D_ASSERT(!internal); - - FieldWriter writer(serializer); - writer.WriteString(schema->name); - writer.WriteString(name); - writer.WriteRegularSerializableList(columns); - writer.WriteSerializableList(constraints); - writer.Finalize(); -} - -unique_ptr TableCatalogEntry::Deserialize(Deserializer &source, ClientContext &context) { - auto info = make_unique(); - - FieldReader reader(source); - info->schema = reader.ReadRequired(); - info->table = reader.ReadRequired(); - info->columns = reader.ReadRequiredSerializableList(); - info->constraints = reader.ReadRequiredSerializableList(); - reader.Finalize(); - - return info; -} - -string TableCatalogEntry::ToSQL() { - std::stringstream ss; - ss << "CREATE TABLE "; - - if (schema->name != DEFAULT_SCHEMA) { - ss << KeywordHelper::WriteOptionallyQuoted(schema->name) << "."; + static inline uint8_t FlipSign(uint8_t key_byte) { + return key_byte ^ 128; } - ss << KeywordHelper::WriteOptionallyQuoted(name) << "("; + static inline uint32_t EncodeFloat(float x) { + uint64_t buff; - // find all columns that have NOT NULL specified, but are NOT primary key columns - unordered_set not_null_columns; - unordered_set unique_columns; - unordered_set pk_columns; - unordered_set multi_key_pks; - vector extra_constraints; - for (auto &constraint : constraints) { - if (constraint->type == ConstraintType::NOT_NULL) { - auto ¬_null = (NotNullConstraint &)*constraint; - not_null_columns.insert(not_null.index); - } else if (constraint->type == ConstraintType::UNIQUE) { - auto &pk = (UniqueConstraint &)*constraint; - vector constraint_columns = pk.columns; - if (pk.index != DConstants::INVALID_INDEX) { - // no columns specified: single column constraint - if (pk.is_primary_key) { - pk_columns.insert(pk.index); - } else { - unique_columns.insert(pk.index); - } - } else { - // multi-column constraint, this constraint needs to go at the end after all columns - if (pk.is_primary_key) { - // multi key pk column: insert set of columns into multi_key_pks - for (auto &col : pk.columns) { - multi_key_pks.insert(col); - } - } - extra_constraints.push_back(constraint->ToString()); - } - } else if (constraint->type == ConstraintType::FOREIGN_KEY) { - auto &fk = (ForeignKeyConstraint &)*constraint; - if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE || - fk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { - extra_constraints.push_back(constraint->ToString()); - } - } else { - extra_constraints.push_back(constraint->ToString()); + //! zero + if (x == 0) { + buff = 0; + buff |= (1u << 31); + return buff; + } + // nan + if (Value::IsNan(x)) { + return UINT_MAX; + } + //! infinity + if (x > FLT_MAX) { + return UINT_MAX - 1; } + //! -infinity + if (x < -FLT_MAX) { + return 0; + } + buff = Load((const_data_ptr_t)&x); + if ((buff & (1u << 31)) == 0) { //! +0 and positive numbers + buff |= (1u << 31); + } else { //! negative numbers + buff = ~buff; //! complement 1 + } + + return buff; } - for (idx_t i = 0; i < columns.size(); i++) { - if (i > 0) { - ss << ", "; - } - auto &column = columns[i]; - ss << KeywordHelper::WriteOptionallyQuoted(column.Name()) << " "; - ss << column.Type().ToString(); - bool not_null = not_null_columns.find(column.Oid()) != not_null_columns.end(); - bool is_single_key_pk = pk_columns.find(column.Oid()) != pk_columns.end(); - bool is_multi_key_pk = multi_key_pks.find(column.Name()) != multi_key_pks.end(); - bool is_unique = unique_columns.find(column.Oid()) != unique_columns.end(); - if (not_null && !is_single_key_pk && !is_multi_key_pk) { - // NOT NULL but not a primary key column - ss << " NOT NULL"; + static inline uint64_t EncodeDouble(double x) { + uint64_t buff; + //! zero + if (x == 0) { + buff = 0; + buff += (1ull << 63); + return buff; } - if (is_single_key_pk) { - // single column pk: insert constraint here - ss << " PRIMARY KEY"; + // nan + if (Value::IsNan(x)) { + return ULLONG_MAX; } - if (is_unique) { - // single column unique: insert constraint here - ss << " UNIQUE"; + //! infinity + if (x > DBL_MAX) { + return ULLONG_MAX - 1; } - if (column.DefaultValue()) { - ss << " DEFAULT(" << column.DefaultValue()->ToString() << ")"; + //! -infinity + if (x < -DBL_MAX) { + return 0; } - if (column.Generated()) { - ss << " GENERATED ALWAYS AS(" << column.GeneratedExpression().ToString() << ")"; + buff = Load((const_data_ptr_t)&x); + if (buff < (1ull << 63)) { //! +0 and positive numbers + buff += (1ull << 63); + } else { //! negative numbers + buff = ~buff; //! complement 1 } + return buff; } - // print any extra constraints that still need to be printed - for (auto &extra_constraint : extra_constraints) { - ss << ", "; - ss << extra_constraint; - } +}; - ss << ");"; - return ss.str(); +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, bool value) { + Store(value ? 1 : 0, dataptr); } -unique_ptr TableCatalogEntry::Copy(ClientContext &context) { - auto create_info = make_unique(schema->name, name); - for (idx_t i = 0; i < columns.size(); i++) { - create_info->columns.push_back(columns[i].Copy()); - } - - for (idx_t i = 0; i < constraints.size(); i++) { - auto constraint = constraints[i]->Copy(); - create_info->constraints.push_back(move(constraint)); - } - - auto binder = Binder::CreateBinder(context); - auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); - return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, int8_t value) { + Store(value, dataptr); + dataptr[0] = FlipSign(dataptr[0]); } -void TableCatalogEntry::SetAsRoot() { - storage->SetAsRoot(); +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, int16_t value) { + Store(BSWAP16(value), dataptr); + dataptr[0] = FlipSign(dataptr[0]); } -void TableCatalogEntry::CommitAlter(AlterInfo &info) { - D_ASSERT(info.type == AlterType::ALTER_TABLE); - auto &alter_table = (AlterTableInfo &)info; - string column_name; - switch (alter_table.alter_table_type) { - case AlterTableType::REMOVE_COLUMN: { - auto &remove_info = (RemoveColumnInfo &)alter_table; - column_name = remove_info.removed_column; - break; - } - case AlterTableType::ALTER_COLUMN_TYPE: { - auto &change_info = (ChangeColumnTypeInfo &)alter_table; - column_name = change_info.column_name; - break; - } - default: - break; - } - if (column_name.empty()) { - return; - } - idx_t removed_index = DConstants::INVALID_INDEX; - for (idx_t i = 0; i < columns.size(); i++) { - auto &col = columns[i]; - if (col.Name() == column_name) { - // No need to alter storage, removed column is generated column - if (col.Generated()) { - return; - } - removed_index = i; - break; - } - } - D_ASSERT(removed_index != DConstants::INVALID_INDEX); - storage->CommitDropColumn(columns[removed_index].StorageOid()); +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, int32_t value) { + Store(BSWAP32(value), dataptr); + dataptr[0] = FlipSign(dataptr[0]); } -void TableCatalogEntry::CommitDrop() { - storage->CommitDropTable(); +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, int64_t value) { + Store(BSWAP64(value), dataptr); + dataptr[0] = FlipSign(dataptr[0]); } -} // namespace duckdb - - +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, uint8_t value) { + Store(value, dataptr); +} -namespace duckdb { +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, uint16_t value) { + Store(BSWAP16(value), dataptr); +} -TableFunctionCatalogEntry::TableFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, - CreateTableFunctionInfo *info) - : StandardEntry(CatalogType::TABLE_FUNCTION_ENTRY, schema, catalog, info->name), functions(move(info->functions)) { - D_ASSERT(this->functions.Size() > 0); +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, uint32_t value) { + Store(BSWAP32(value), dataptr); } -} // namespace duckdb +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, uint64_t value) { + Store(BSWAP64(value), dataptr); +} +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, hugeint_t value) { + EncodeData(dataptr, value.upper); + EncodeData(dataptr + sizeof(value.upper), value.lower); +} +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, float value) { + uint32_t converted_value = EncodeFloat(value); + Store(BSWAP32(converted_value), dataptr); +} +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, double value) { + uint64_t converted_value = EncodeDouble(value); + Store(BSWAP64(converted_value), dataptr); +} +template <> +inline void Radix::EncodeData(data_ptr_t dataptr, interval_t value) { + EncodeData(dataptr, value.months); + dataptr += sizeof(value.months); + EncodeData(dataptr, value.days); + dataptr += sizeof(value.days); + EncodeData(dataptr, value.micros); +} +} // namespace duckdb -#include -#include namespace duckdb { -TypeCatalogEntry::TypeCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateTypeInfo *info) - : StandardEntry(CatalogType::TYPE_ENTRY, schema, catalog, info->name), user_type(info->type) { - this->temporary = info->temporary; - this->internal = info->internal; -} +class Key { +public: + Key(); + Key(data_ptr_t data, idx_t len); + Key(ArenaAllocator &allocator, idx_t len); -void TypeCatalogEntry::Serialize(Serializer &serializer) { - D_ASSERT(!internal); - FieldWriter writer(serializer); - writer.WriteString(schema->name); - writer.WriteString(name); - writer.WriteSerializable(user_type); - writer.Finalize(); -} + idx_t len; + data_ptr_t data; -unique_ptr TypeCatalogEntry::Deserialize(Deserializer &source) { - auto info = make_unique(); +public: + template + static inline Key CreateKey(ArenaAllocator &allocator, T element) { + auto data = Key::CreateData(allocator, element); + return Key(data, sizeof(element)); + } - FieldReader reader(source); - info->schema = reader.ReadRequired(); - info->name = reader.ReadRequired(); - info->type = reader.ReadRequiredSerializable(); - reader.Finalize(); + template + static inline Key CreateKey(ArenaAllocator &allocator, const Value &element) { + return CreateKey(allocator, element.GetValueUnsafe()); + } - return info; -} + template + static inline void CreateKey(ArenaAllocator &allocator, Key &key, T element) { + key.data = Key::CreateData(allocator, element); + key.len = sizeof(element); + } -string TypeCatalogEntry::ToSQL() { - std::stringstream ss; - switch (user_type.id()) { - case (LogicalTypeId::ENUM): { - Vector values_insert_order(EnumType::GetValuesInsertOrder(user_type)); - idx_t size = EnumType::GetSize(user_type); - ss << "CREATE TYPE "; - ss << KeywordHelper::WriteOptionallyQuoted(name); - ss << " AS ENUM ( "; + template + static inline void CreateKey(ArenaAllocator &allocator, Key &key, const Value element) { + key.data = Key::CreateData(allocator, element.GetValueUnsafe()); + key.len = sizeof(element); + } - for (idx_t i = 0; i < size; i++) { - ss << "'" << values_insert_order.GetValue(i).ToString() << "'"; - if (i != size - 1) { - ss << ", "; - } - } - ss << ");"; - break; +public: + data_t &operator[](size_t i) { + return data[i]; } - default: - throw InternalException("Logical Type can't be used as a User Defined Type"); + const data_t &operator[](size_t i) const { + return data[i]; } + bool operator>(const Key &k) const; + bool operator<(const Key &k) const; + bool operator>=(const Key &k) const; + bool operator==(const Key &k) const; - return ss.str(); -} - -} // namespace duckdb + bool ByteMatches(Key &other, idx_t &depth); + bool Empty(); + void ConcatenateKey(ArenaAllocator &allocator, Key &concat_key); +private: + template + static inline data_ptr_t CreateData(ArenaAllocator &allocator, T value) { + auto data = allocator.Allocate(sizeof(value)); + Radix::EncodeData(data, value); + return data; + } +}; +template <> +Key Key::CreateKey(ArenaAllocator &allocator, string_t value); +template <> +Key Key::CreateKey(ArenaAllocator &allocator, const char *value); +template <> +void Key::CreateKey(ArenaAllocator &allocator, Key &key, string_t value); +template <> +void Key::CreateKey(ArenaAllocator &allocator, Key &key, const char *value); +} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/iterator.hpp +// +// +//===----------------------------------------------------------------------===// +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/leaf.hpp +// +// +//===----------------------------------------------------------------------===// -#include -namespace duckdb { +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/node.hpp +// +// +//===----------------------------------------------------------------------===// -void ViewCatalogEntry::Initialize(CreateViewInfo *info) { - query = move(info->query); - this->aliases = info->aliases; - this->types = info->types; - this->temporary = info->temporary; - this->sql = info->sql; - this->internal = info->internal; -} -ViewCatalogEntry::ViewCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateViewInfo *info) - : StandardEntry(CatalogType::VIEW_ENTRY, schema, catalog, info->view_name) { - Initialize(info); -} -unique_ptr ViewCatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { - D_ASSERT(!internal); - if (info->type != AlterType::ALTER_VIEW) { - throw CatalogException("Can only modify view with ALTER VIEW statement"); - } - auto view_info = (AlterViewInfo *)info; - switch (view_info->alter_view_type) { - case AlterViewType::RENAME_VIEW: { - auto rename_info = (RenameViewInfo *)view_info; - auto copied_view = Copy(context); - copied_view->name = rename_info->new_view_name; - return copied_view; - } - default: - throw InternalException("Unrecognized alter view type!"); - } -} -void ViewCatalogEntry::Serialize(Serializer &serializer) { - D_ASSERT(!internal); - FieldWriter writer(serializer); - writer.WriteString(schema->name); - writer.WriteString(name); - writer.WriteString(sql); - writer.WriteSerializable(*query); - writer.WriteList(aliases); - writer.WriteRegularSerializableList(types); - writer.Finalize(); -} -unique_ptr ViewCatalogEntry::Deserialize(Deserializer &source, ClientContext &context) { - auto info = make_unique(); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/prefix.hpp +// +// +//===----------------------------------------------------------------------===// - FieldReader reader(source); - info->schema = reader.ReadRequired(); - info->view_name = reader.ReadRequired(); - info->sql = reader.ReadRequired(); - info->query = reader.ReadRequiredSerializable(); - info->aliases = reader.ReadRequiredList(); - info->types = reader.ReadRequiredSerializableList(); - reader.Finalize(); - return info; -} -string ViewCatalogEntry::ToSQL() { - if (sql.empty()) { - //! Return empty sql with view name so pragma view_tables don't complain - return sql; - } - return sql + "\n;"; -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/meta_block_reader.hpp +// +// +//===----------------------------------------------------------------------===// + -unique_ptr ViewCatalogEntry::Copy(ClientContext &context) { - D_ASSERT(!internal); - auto create_info = make_unique(schema->name, name); - create_info->query = unique_ptr_cast(query->Copy()); - for (idx_t i = 0; i < aliases.size(); i++) { - create_info->aliases.push_back(aliases[i]); - } - for (idx_t i = 0; i < types.size(); i++) { - create_info->types.push_back(types[i]); - } - create_info->temporary = temporary; - create_info->sql = sql; - return make_unique(catalog, schema, create_info.get()); -} -} // namespace duckdb namespace duckdb { +class BlockHandle; +class BlockManager; +class BufferHandle; +class DatabaseInstance; -CatalogEntry::CatalogEntry(CatalogType type, Catalog *catalog_p, string name_p) - : oid(catalog_p->ModifyCatalog()), type(type), catalog(catalog_p), set(nullptr), name(move(name_p)), deleted(false), - temporary(false), internal(false), parent(nullptr) { -} +//! This struct is responsible for reading meta data from disk +class MetaBlockReader : public Deserializer { +public: + MetaBlockReader(BlockManager &block_manager, block_id_t block, bool free_blocks_on_read = true); + ~MetaBlockReader() override; -CatalogEntry::~CatalogEntry() { -} + BlockManager &block_manager; + shared_ptr block; + BufferHandle handle; + idx_t offset; + block_id_t next_block; + bool free_blocks_on_read; -void CatalogEntry::SetAsRoot() { -} +public: + //! Read content of size read_size into the buffer + void ReadData(data_ptr_t buffer, idx_t read_size) override; -// LCOV_EXCL_START -unique_ptr CatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { - throw InternalException("Unsupported alter type for catalog entry!"); -} +private: + void ReadNewBlock(block_id_t id); +}; +} // namespace duckdb -unique_ptr CatalogEntry::Copy(ClientContext &context) { - throw InternalException("Unsupported copy type for catalog entry!"); -} -string CatalogEntry::ToSQL() { - throw InternalException("Unsupported catalog type for ToSQL()"); -} -// LCOV_EXCL_STOP -} // namespace duckdb +namespace duckdb { +class Prefix { + static constexpr idx_t PREFIX_INLINE_BYTES = 8; + +public: + Prefix(); + // Prefix created from key starting on `depth` + Prefix(Key &key, uint32_t depth, uint32_t size); + // Prefix created from other prefix up to size + Prefix(Prefix &other_prefix, uint32_t size); + ~Prefix(); + + // Returns the Prefix's size + uint32_t Size() const; + //! Return a pointer to the prefix data + uint8_t *GetPrefixData(); + const uint8_t *GetPrefixData() const; + + // Subscript operator + uint8_t &operator[](idx_t idx); + + // Assign operator + Prefix &operator=(const Prefix &src); + + // Move operator + Prefix &operator=(Prefix &&other) noexcept; + + // Concatenate Prefix with a key and another prefix + // Used when deleting a Node. + // other.prefix + key + this->Prefix + void Concatenate(uint8_t key, Prefix &other); + // Reduces the prefix in n elements, and returns what would be the first one as a key + uint8_t Reduce(uint32_t n); + // Serializes Prefix + void Serialize(duckdb::MetaBlockWriter &writer); + // Deserializes Prefix + void Deserialize(duckdb::MetaBlockReader &reader); + + // Compare the key with the prefix of the node, return the position where it mismatches + uint32_t KeyMismatchPosition(Key &key, uint64_t depth); + //! Compare this prefix to another prefix, return the position where they mismatch, or size otherwise + uint32_t MismatchPosition(Prefix &other); + +private: + uint32_t size; + union { + uint8_t *ptr; + uint8_t inlined[8]; + } value; +private: + bool IsInlined() const; + uint8_t *AllocatePrefix(uint32_t size); + void Overwrite(uint32_t new_size, uint8_t *data); + void Destroy(); +}; +} // namespace duckdb @@ -7480,69 +8063,241 @@ string CatalogEntry::ToSQL() { namespace duckdb { +enum class NodeType : uint8_t { NLeaf = 0, N4 = 1, N16 = 2, N48 = 3, N256 = 4 }; +class ART; +class Node; -CatalogSearchPath::CatalogSearchPath(ClientContext &context_p) : context(context_p) { - SetPaths(ParsePaths("")); -} +// Note: SwizzleablePointer assumes top 33 bits of the block_id are 0. Use a different +// pointer implementation if that does not hold. +class SwizzleablePointer; +using ARTPointer = SwizzleablePointer; -void CatalogSearchPath::Set(vector &new_paths, bool is_set_schema) { - if (is_set_schema && new_paths.size() != 1) { - throw CatalogException("SET schema can set only 1 schema. This has %d", new_paths.size()); +struct InternalType { + explicit InternalType(Node *n); + + void Set(uint8_t *key_p, uint16_t key_size_p, ARTPointer *children_p, uint16_t children_size_p); + uint8_t *key; + uint16_t key_size; + ARTPointer *children; + uint16_t children_size; +}; + +struct MergeInfo { + MergeInfo(ART *l_art, ART *r_art, Node *&l_node, Node *&r_node) + : l_art(l_art), r_art(r_art), l_node(l_node), r_node(r_node) {}; + ART *l_art; + ART *r_art; + Node *&l_node; + Node *&r_node; +}; + +struct ParentsOfNodes { + ParentsOfNodes(Node *&l_parent, idx_t l_pos, Node *&r_parent, idx_t r_pos) + : l_parent(l_parent), l_pos(l_pos), r_parent(r_parent), r_pos(r_pos) {}; + Node *&l_parent; + idx_t l_pos; + Node *&r_parent; + idx_t r_pos; +}; + +class Node { +public: + static const uint8_t EMPTY_MARKER = 48; + +public: + explicit Node(NodeType type); + virtual ~Node() { } - auto &catalog = Catalog::GetCatalog(context); - for (const auto &path : new_paths) { - if (!catalog.GetSchema(context, StringUtil::Lower(path), true)) { - throw CatalogException("SET %s: No schema named %s found.", is_set_schema ? "schema" : "search_path", path); - } + + //! Number of non-null children + uint16_t count; + //! Node type + NodeType type; + //! Compressed path (prefix) + Prefix prefix; + + static void Delete(Node *node); + //! Get the position of a child corresponding exactly to the specific byte, returns DConstants::INVALID_INDEX if not + //! exists + virtual idx_t GetChildPos(uint8_t k) { + return DConstants::INVALID_INDEX; } - this->set_paths = move(new_paths); - SetPaths(set_paths); -} + //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX + //! if there are no children matching the criteria + virtual idx_t GetChildGreaterEqual(uint8_t k, bool &equal) { + throw InternalException("Unimplemented GetChildGreaterEqual for ART node"); + } + //! Get the position of the minimum element in the node + virtual idx_t GetMin(); + //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position. if pos == + //! DConstants::INVALID_INDEX, then the first valid position in the node is returned + virtual idx_t GetNextPos(idx_t pos) { + return DConstants::INVALID_INDEX; + } + //! Get the child at the specified position in the node. pos should be between [0, count). Throws an assertion if + //! the element is not found + virtual Node *GetChild(ART &art, idx_t pos); + //! Replaces the pointer to a child node + virtual void ReplaceChildPointer(idx_t pos, Node *node); -void CatalogSearchPath::Set(const string &new_value, bool is_set_schema) { - auto new_paths = ParsePaths(new_value); - Set(new_paths, is_set_schema); -} + //! Insert a new child node at key_byte into the node + static void InsertChild(Node *&node, uint8_t key_byte, Node *new_child); + //! Erase child node entry from node + static void EraseChild(Node *&node, idx_t pos, ART &art); + //! Get the corresponding node type for the provided size + static NodeType GetTypeBySize(idx_t size); + //! Create a new node of the specified type + static void New(NodeType &type, Node *&node); + + //! Returns the string representation of a node + string ToString(ART &art); + //! Serialize this node + BlockPointer Serialize(ART &art, duckdb::MetaBlockWriter &writer); -const vector &CatalogSearchPath::Get() { - return paths; -} + //! Deserialize this node + static Node *Deserialize(ART &art, idx_t block_id, idx_t offset); + //! Merge r_node into l_node at the specified byte + static bool MergeAtByte(MergeInfo &info, idx_t depth, idx_t &l_child_pos, idx_t &r_pos, uint8_t &key_byte, + Node *&l_parent, idx_t l_pos); + //! Merge two ART + static bool MergeARTs(ART *l_art, ART *r_art); -const string &CatalogSearchPath::GetOrDefault(const string &name) { - return name == INVALID_SCHEMA ? GetDefault() : name; // NOLINT -} +private: + //! Serialize internal nodes + BlockPointer SerializeInternal(ART &art, duckdb::MetaBlockWriter &writer, InternalType &internal_type); + //! Deserialize internal nodes + void DeserializeInternal(duckdb::MetaBlockReader &reader); +}; -const string &CatalogSearchPath::GetDefault() { - const auto &paths = Get(); - D_ASSERT(paths.size() >= 2); - D_ASSERT(paths[0] == TEMP_SCHEMA); - return paths[1]; -} +} // namespace duckdb -void CatalogSearchPath::SetPaths(vector new_paths) { - paths.clear(); - paths.reserve(new_paths.size() + 3); - paths.emplace_back(TEMP_SCHEMA); - for (auto &path : new_paths) { - paths.push_back(move(path)); + + +namespace duckdb { + +class Leaf : public Node { +public: + Leaf(Key &value, uint32_t depth, row_t row_id); + Leaf(Key &value, uint32_t depth, row_t *row_ids, idx_t num_elements); + Leaf(row_t *row_ids, idx_t num_elements, Prefix &prefix); + Leaf(row_t row_id, Prefix &prefix); + ~Leaf(); + + row_t GetRowId(idx_t index); + idx_t GetCapacity() const; + bool IsInlined() const; + row_t *GetRowIds(); + +public: + static Leaf *New(Key &value, uint32_t depth, row_t row_id); + static Leaf *New(Key &value, uint32_t depth, row_t *row_ids, idx_t num_elements); + static Leaf *New(row_t *row_ids, idx_t num_elements, Prefix &prefix); + static Leaf *New(row_t row_id, Prefix &prefix); + //! Insert a row_id into a leaf + void Insert(row_t row_id); + //! Remove a row_id from a leaf + void Remove(row_t row_id); + + //! Returns the string representation of a leaf + static string ToString(Node *node); + //! Merge two NLeaf nodes + static void Merge(Node *&l_node, Node *&r_node); + + //! Serialize a leaf + BlockPointer Serialize(duckdb::MetaBlockWriter &writer); + // Deserialize a leaf + static Leaf *Deserialize(duckdb::MetaBlockReader &reader); + +private: + union { + row_t inlined; + row_t *ptr; + } rowids; + +private: + row_t *Resize(row_t *current_row_ids, uint32_t current_count, idx_t new_capacity); +}; + +} // namespace duckdb + + + +namespace duckdb { + +struct IteratorEntry { + IteratorEntry() { + } + IteratorEntry(Node *node, idx_t pos) : node(node), pos(pos) { } - paths.emplace_back(DEFAULT_SCHEMA); - paths.emplace_back("pg_catalog"); -} -vector CatalogSearchPath::ParsePaths(const string &value) { - return StringUtil::SplitWithQuote(StringUtil::Lower(value)); -} + Node *node = nullptr; + idx_t pos = 0; +}; + +//! Keeps track of the current key in the iterator +class IteratorCurrentKey { +public: + //! Push Byte + void Push(uint8_t key); + //! Pops n elements + void Pop(idx_t n); + //! Subscript operator + uint8_t &operator[](idx_t idx); + bool operator>(const Key &k) const; + bool operator>=(const Key &k) const; + bool operator==(const Key &k) const; + +private: + //! The current key position + idx_t cur_key_pos = 0; + //! The current key of the Leaf Node + vector key; +}; + +class Iterator { +public: + //! Current Key + IteratorCurrentKey cur_key; + //! Pointer to the ART tree we are iterating + ART *art = nullptr; + + //! Scan the tree + bool Scan(Key &bound, idx_t max_count, vector &result_ids, bool is_inclusive); + //! Finds minimum value of the tree + void FindMinimum(Node &node); + //! Goes to lower bound + bool LowerBound(Node *node, Key &key, bool inclusive); +private: + //! Stack of iterator entries + stack nodes; + //! Last visited leaf + Leaf *last_leaf = nullptr; + //! Go to the next node + bool Next(); + //! Push part of the key to cur_key + void PushKey(Node *node, uint16_t pos); + //! Pop node + void PopNode(); +}; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/node16.hpp +// +// +//===----------------------------------------------------------------------===// + + //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/transaction/transaction_manager.hpp +// duckdb/execution/index/art/swizzleable_pointer.hpp // // //===----------------------------------------------------------------------===// @@ -7550,95 +8305,136 @@ vector CatalogSearchPath::ParsePaths(const string &value) { +namespace duckdb { + +class ART; +class Node; + +// SwizzleablePointer assumes that the 64-bit blockId always has 0s in the top +// 33 bits. It thus uses 8 bytes of memory rather than 12. +class SwizzleablePointer { +public: + ~SwizzleablePointer(); + explicit SwizzleablePointer(duckdb::MetaBlockReader &reader); + SwizzleablePointer() : pointer(0) {}; + BlockPointer Serialize(ART &art, duckdb::MetaBlockWriter &writer); + //! Transforms from Node* to uint64_t + SwizzleablePointer &operator=(const Node *ptr); + //! Unswizzle the pointer (if possible) + Node *Unswizzle(ART &art); + operator bool() const { + return pointer; + } + //! Deletes the underlying object (if necessary) and set the pointer to null_ptr + void Reset(); -namespace duckdb { +private: + uint64_t pointer; -class ClientContext; -class Catalog; -struct ClientLockWrapper; -class DatabaseInstance; -class Transaction; + friend bool operator!=(const SwizzleablePointer &s_ptr, const uint64_t &ptr); -struct StoredCatalogSet { - //! Stored catalog set - unique_ptr stored_set; - //! The highest active query number when the catalog set was stored; used for cleaning up - transaction_t highest_active_query; + //! Extracts block info from swizzled pointer + BlockPointer GetSwizzledBlockInfo(); + //! Checks if pointer is swizzled + bool IsSwizzled(); }; -//! The Transaction Manager is responsible for creating and managing -//! transactions -class TransactionManager { - friend struct CheckpointLock; +} // namespace duckdb + +namespace duckdb { + +class Node16 : public Node { public: - explicit TransactionManager(DatabaseInstance &db); - ~TransactionManager(); + explicit Node16(); + uint8_t key[16]; + ARTPointer children[16]; - //! Start a new transaction - Transaction *StartTransaction(ClientContext &context); - //! Commit the given transaction - string CommitTransaction(ClientContext &context, Transaction *transaction); - //! Rollback the given transaction - void RollbackTransaction(Transaction *transaction); +public: + static Node16 *New(); + //! Get position of a specific byte, returns DConstants::INVALID_INDEX if not exists + idx_t GetChildPos(uint8_t k) override; + //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX + //! if there are no children matching the criteria + idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; + //! Get the position of the minimum element in the node + idx_t GetMin() override; + //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position + idx_t GetNextPos(idx_t pos) override; + //! Get Node16 child + Node *GetChild(ART &art, idx_t pos) override; + //! Replace child pointer + void ReplaceChildPointer(idx_t pos, Node *node) override; - transaction_t GetQueryNumber() { - return current_query_number++; - } - transaction_t LowestActiveId() { - return lowest_active_id; - } - transaction_t LowestActiveStart() { - return lowest_active_start; - } + //! Insert a new child node at key_byte into the Node16 + static void InsertChild(Node *&node, uint8_t key_byte, Node *new_child); + //! Erase the child at pos and (if necessary) shrink to Node4 + static void EraseChild(Node *&node, int pos, ART &art); + //! Merge Node16 into l_node + static bool Merge(MergeInfo &info, idx_t depth, Node *&l_parent, idx_t l_pos); + //! Returns the size (maximum capacity) of the Node16 + static idx_t GetSize(); +}; +} // namespace duckdb - void Checkpoint(ClientContext &context, bool force = false); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/node256.hpp +// +// +//===----------------------------------------------------------------------===// - static TransactionManager &Get(ClientContext &context); - static TransactionManager &Get(DatabaseInstance &db); -private: - bool CanCheckpoint(Transaction *current = nullptr); - //! Remove the given transaction from the list of active transactions - void RemoveTransaction(Transaction *transaction) noexcept; - void LockClients(vector &client_locks, ClientContext &context); - //! The database instance - DatabaseInstance &db; - //! The current query number - atomic current_query_number; - //! The current start timestamp used by transactions - transaction_t current_start_timestamp; - //! The current transaction ID used by transactions - transaction_t current_transaction_id; - //! The lowest active transaction id - atomic lowest_active_id; - //! The lowest active transaction timestamp - atomic lowest_active_start; - //! Set of currently running transactions - vector> active_transactions; - //! Set of recently committed transactions - vector> recently_committed_transactions; - //! Transactions awaiting GC - vector> old_transactions; - //! Catalog sets - vector old_catalog_sets; - //! The lock used for transaction operations - mutex transaction_lock; - bool thread_is_checkpointing; -}; -} // namespace duckdb +namespace duckdb { +class Node256 : public Node { +public: + explicit Node256(); + ARTPointer children[256]; +public: + static Node256 *New(); + //! Get position of a specific byte, returns DConstants::INVALID_INDEX if not exists + idx_t GetChildPos(uint8_t k) override; + //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX + //! if there are no children matching the criteria + idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; + //! Get the position of the minimum element in the node + idx_t GetMin() override; + //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position + idx_t GetNextPos(idx_t pos) override; + //! Get Node256 child + Node *GetChild(ART &art, idx_t pos) override; + //! Replace child pointer + void ReplaceChildPointer(idx_t pos, Node *node) override; + //! Insert a new child node at key_byte into the Node256 + static void InsertChild(Node *&node, uint8_t key_byte, Node *new_child); + //! Erase the child at pos and (if necessary) shrink to Node48 + static void EraseChild(Node *&node, int pos, ART &art); + //! Merge Node256 into l_node + static bool Merge(MergeInfo &info, idx_t depth, Node *&l_parent, idx_t l_pos); + //! Returns the size (maximum capacity) of the Node256 + static idx_t GetSize(); +}; +} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/node4.hpp +// +// +//===----------------------------------------------------------------------===// @@ -7646,636 +8442,364 @@ class TransactionManager { namespace duckdb { -//! Class responsible to keep track of state when removing entries from the catalog. -//! When deleting, many types of errors can be thrown, since we want to avoid try/catch blocks -//! this class makes sure that whatever elements were modified are returned to a correct state -//! when exceptions are thrown. -//! The idea here is to use RAII (Resource acquisition is initialization) to mimic a try/catch/finally block. -//! If any exception is raised when this object exists, then its destructor will be called -//! and the entry will return to its previous state during deconstruction. -class EntryDropper { +class Node4 : public Node { public: - //! Both constructor and destructor are privates because they should only be called by DropEntryDependencies - explicit EntryDropper(CatalogSet &catalog_set, idx_t entry_index) - : catalog_set(catalog_set), entry_index(entry_index) { - old_deleted = catalog_set.entries[entry_index].get()->deleted; - } + Node4(); - ~EntryDropper() { - catalog_set.entries[entry_index].get()->deleted = old_deleted; - } + uint8_t key[4]; + // Pointers to the child nodes + ARTPointer children[4]; -private: - //! The current catalog_set - CatalogSet &catalog_set; - //! Keeps track of the state of the entry before starting the delete - bool old_deleted; - //! Index of entry to be deleted - idx_t entry_index; +public: + static Node4 *New(); + //! Get position of a byte, returns DConstants::INVALID_INDEX if not exists + idx_t GetChildPos(uint8_t k) override; + //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX + //! if there are no children matching the criteria + idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; + //! Get the position of the minimum element in the node + idx_t GetMin() override; + //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position + idx_t GetNextPos(idx_t pos) override; + //! Get Node4 child + Node *GetChild(ART &art, idx_t pos) override; + //! Replace child pointer + void ReplaceChildPointer(idx_t pos, Node *node) override; + + //! Insert a new child node at key_byte into the Node4 + static void InsertChild(Node *&node, uint8_t key_byte, Node *new_child); + //! Erase the child at pos and (if necessary) merge with last child + static void EraseChild(Node *&node, int pos, ART &art); + //! Merge Node4 into l_node + static bool Merge(MergeInfo &info, idx_t depth, Node *&l_parent, idx_t l_pos); + //! Returns the size (maximum capacity) of the Node4 + static idx_t GetSize(); }; +} // namespace duckdb -CatalogSet::CatalogSet(Catalog &catalog, unique_ptr defaults) - : catalog(catalog), defaults(move(defaults)) { -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/execution/index/art/node48.hpp +// +// +//===----------------------------------------------------------------------===// -bool CatalogSet::CreateEntry(ClientContext &context, const string &name, unique_ptr value, - unordered_set &dependencies) { - auto &transaction = Transaction::GetTransaction(context); - // lock the catalog for writing - lock_guard write_lock(catalog.write_lock); - // lock this catalog set to disallow reading - unique_lock read_lock(catalog_lock); - // first check if the entry exists in the unordered set - idx_t entry_index; - auto mapping_value = GetMapping(context, name); - if (mapping_value == nullptr || mapping_value->deleted) { - // if it does not: entry has never been created - // check if there is a default entry - auto entry = CreateDefaultEntry(context, name, read_lock); - if (entry) { - return false; - } - // first create a dummy deleted entry for this entry - // so transactions started before the commit of this transaction don't - // see it yet - entry_index = current_entry++; - auto dummy_node = make_unique(CatalogType::INVALID, value->catalog, name); - dummy_node->timestamp = 0; - dummy_node->deleted = true; - dummy_node->set = this; - entries[entry_index] = move(dummy_node); - PutMapping(context, name, entry_index); - } else { - entry_index = mapping_value->index; - auto ¤t = *entries[entry_index]; - // if it does, we have to check version numbers - if (HasConflict(context, current.timestamp)) { - // current version has been written to by a currently active - // transaction - throw TransactionException("Catalog write-write conflict on create with \"%s\"", current.name); - } - // there is a current version that has been committed - // if it has not been deleted there is a conflict - if (!current.deleted) { - return false; - } - } - // create a new entry and replace the currently stored one - // set the timestamp to the timestamp of the current transaction - // and point it at the dummy node - value->timestamp = transaction.transaction_id; - value->set = this; +namespace duckdb { - // now add the dependency set of this object to the dependency manager - catalog.dependency_manager->AddObject(context, value.get(), dependencies); +class Node48 : public Node { +public: + explicit Node48(); + uint8_t child_index[256]; + ARTPointer children[48]; - value->child = move(entries[entry_index]); - value->child->parent = value.get(); - // push the old entry in the undo buffer for this transaction - transaction.PushCatalogEntry(value->child.get()); - entries[entry_index] = move(value); - return true; -} +public: + static Node48 *New(); + //! Get position of a specific byte, returns DConstants::INVALID_INDEX if not exists + idx_t GetChildPos(uint8_t k) override; + //! Get the position of the first child that is greater or equal to the specific byte, or DConstants::INVALID_INDEX + //! if there are no children matching the criteria + idx_t GetChildGreaterEqual(uint8_t k, bool &equal) override; + //! Get the position of the minimum element in the node + idx_t GetMin() override; + //! Get the next position in the node, or DConstants::INVALID_INDEX if there is no next position + idx_t GetNextPos(idx_t pos) override; + //! Get Node48 child + Node *GetChild(ART &art, idx_t pos) override; + //! Replace child pointer + void ReplaceChildPointer(idx_t pos, Node *node) override; -bool CatalogSet::GetEntryInternal(ClientContext &context, idx_t entry_index, CatalogEntry *&catalog_entry) { - catalog_entry = entries[entry_index].get(); - // if it does: we have to retrieve the entry and to check version numbers - if (HasConflict(context, catalog_entry->timestamp)) { - // current version has been written to by a currently active - // transaction - throw TransactionException("Catalog write-write conflict on alter with \"%s\"", catalog_entry->name); - } - // there is a current version that has been committed by this transaction - if (catalog_entry->deleted) { - // if the entry was already deleted, it now does not exist anymore - // so we return that we could not find it - return false; - } - return true; -} + //! Insert a new child node at key_byte into the Node48 + static void InsertChild(Node *&node, uint8_t key_byte, Node *new_child); + //! Erase the child at pos and (if necessary) shrink to Node16 + static void EraseChild(Node *&node, int pos, ART &art); + //! Merge Node48 into l_node + static bool Merge(MergeInfo &info, idx_t depth, Node *&l_parent, idx_t l_pos); + //! Returns the size (maximum capacity) of the Node48 + static idx_t GetSize(); +}; +} // namespace duckdb -bool CatalogSet::GetEntryInternal(ClientContext &context, const string &name, idx_t &entry_index, - CatalogEntry *&catalog_entry) { - auto mapping_value = GetMapping(context, name); - if (mapping_value == nullptr || mapping_value->deleted) { - // the entry does not exist, check if we can create a default entry - return false; - } - entry_index = mapping_value->index; - return GetEntryInternal(context, entry_index, catalog_entry); -} -bool CatalogSet::AlterOwnership(ClientContext &context, ChangeOwnershipInfo *info) { - idx_t entry_index; - CatalogEntry *entry; - if (!GetEntryInternal(context, info->name, entry_index, entry)) { - return false; - } - auto owner_entry = catalog.GetEntry(context, info->owner_schema, info->owner_name); - if (!owner_entry) { - return false; - } - catalog.dependency_manager->AddOwnership(context, owner_entry, entry); - return true; -} -bool CatalogSet::AlterEntry(ClientContext &context, const string &name, AlterInfo *alter_info) { - auto &transaction = Transaction::GetTransaction(context); - // lock the catalog for writing - lock_guard write_lock(catalog.write_lock); +namespace duckdb { - // first check if the entry exists in the unordered set - idx_t entry_index; - CatalogEntry *entry; - if (!GetEntryInternal(context, name, entry_index, entry)) { - return false; - } - if (entry->internal) { - throw CatalogException("Cannot alter entry \"%s\" because it is an internal system entry", entry->name); +struct ARTIndexScanState : public IndexScanState { + ARTIndexScanState() : checked(false), result_index(0) { } - // lock this catalog set to disallow reading - lock_guard read_lock(catalog_lock); + Value values[2]; + ExpressionType expressions[2]; + bool checked; + vector result_ids; + Iterator iterator; + //! Stores the current leaf + Leaf *cur_leaf = nullptr; + //! Offset to leaf + idx_t result_index = 0; +}; - // create a new entry and replace the currently stored one - // set the timestamp to the timestamp of the current transaction - // and point it to the updated table node - string original_name = entry->name; - auto value = entry->AlterEntry(context, alter_info); - if (!value) { - // alter failed, but did not result in an error - return true; - } +enum VerifyExistenceType : uint8_t { + APPEND = 0, // for purpose to append into table + APPEND_FK = 1, // for purpose to append into table has foreign key + DELETE_FK = 2 // for purpose to delete from table related to foreign key +}; - if (value->name != original_name) { - auto mapping_value = GetMapping(context, value->name); - if (mapping_value && !mapping_value->deleted) { - auto entry = GetEntryForTransaction(context, entries[mapping_value->index].get()); - if (!entry->deleted) { - string rename_err_msg = - "Could not rename \"%s\" to \"%s\": another entry with this name already exists!"; - throw CatalogException(rename_err_msg, original_name, value->name); - } - } - } +class ART : public Index { +public: + ART(const vector &column_ids, TableIOManager &table_io_manager, + const vector> &unbound_expressions, IndexConstraintType constraint_type, + DatabaseInstance &db, idx_t block_id = DConstants::INVALID_INDEX, + idx_t block_offset = DConstants::INVALID_INDEX); + ~ART() override; - if (value->name != original_name) { - // Do PutMapping and DeleteMapping after dependency check - PutMapping(context, value->name, entry_index); - DeleteMapping(context, original_name); - } + //! Root of the tree + Node *tree; - value->timestamp = transaction.transaction_id; - value->child = move(entries[entry_index]); - value->child->parent = value.get(); - value->set = this; + DatabaseInstance &db; - // serialize the AlterInfo into a temporary buffer - BufferedSerializer serializer; - alter_info->Serialize(serializer); - BinaryData serialized_alter = serializer.GetData(); +public: + //! Initialize a scan on the index with the given expression and column ids + //! to fetch from the base table for a single predicate + unique_ptr InitializeScanSinglePredicate(Transaction &transaction, Value value, + ExpressionType expression_type) override; - auto new_entry = value.get(); + //! Initialize a scan on the index with the given expression and column ids + //! to fetch from the base table for two predicates + unique_ptr InitializeScanTwoPredicates(Transaction &transaction, Value low_value, + ExpressionType low_expression_type, Value high_value, + ExpressionType high_expression_type) override; - // push the old entry in the undo buffer for this transaction - transaction.PushCatalogEntry(value->child.get(), serialized_alter.data.get(), serialized_alter.size); - entries[entry_index] = move(value); + //! Perform a lookup on the index + bool Scan(Transaction &transaction, DataTable &table, IndexScanState &state, idx_t max_count, + vector &result_ids) override; + //! Append entries to the index + bool Append(IndexLock &lock, DataChunk &entries, Vector &row_identifiers) override; + //! Verify that data can be appended to the index + void VerifyAppend(DataChunk &chunk) override; + //! Verify that data can be appended to the index for foreign key constraint + void VerifyAppendForeignKey(DataChunk &chunk, string *err_msg_ptr) override; + //! Verify that data can be delete from the index for foreign key constraint + void VerifyDeleteForeignKey(DataChunk &chunk, string *err_msg_ptr) override; + //! Delete entries in the index + void Delete(IndexLock &lock, DataChunk &entries, Vector &row_identifiers) override; + //! Insert data into the index. + bool Insert(IndexLock &lock, DataChunk &data, Vector &row_ids) override; - // Check the dependency manager to verify that there are no conflicting dependencies with this alter - // Note that we do this AFTER the new entry has been entirely set up in the catalog set - // that is because in case the alter fails because of a dependency conflict, we need to be able to cleanly roll back - // to the old entry. - catalog.dependency_manager->AlterObject(context, entry, new_entry); + //! Construct ARTs from sorted chunks and merge them. + void ConstructAndMerge(IndexLock &lock, PayloadScanner &scanner, Allocator &allocator) override; - return true; -} + //! Search Equal and fetches the row IDs + bool SearchEqual(Key &key, idx_t max_count, vector &result_ids); + //! Search Equal used for Joins that do not need to fetch data + void SearchEqualJoinNoFetch(Key &key, idx_t &result_size); + //! Serialized the ART + BlockPointer Serialize(duckdb::MetaBlockWriter &writer) override; -void CatalogSet::DropEntryDependencies(ClientContext &context, idx_t entry_index, CatalogEntry &entry, bool cascade) { + //! Merge two ARTs + bool MergeIndexes(IndexLock &state, Index *other_index) override; + //! Generate ART keys for an input chunk + static void GenerateKeys(ArenaAllocator &allocator, DataChunk &input, vector &keys); + //! Returns the string representation of an ART + string ToString() override; - // Stores the deleted value of the entry before starting the process - EntryDropper dropper(*this, entry_index); +private: + //! Insert a row id into a leaf node + bool InsertToLeaf(Leaf &leaf, row_t row_id); + //! Insert the leaf value into the tree + bool Insert(Node *&node, Key &key, idx_t depth, row_t row_id); - // To correctly delete the object and its dependencies, it temporarily is set to deleted. - entries[entry_index].get()->deleted = true; + //! Erase element from leaf (if leaf has more than one value) or eliminate the leaf itself + void Erase(Node *&node, Key &key, idx_t depth, row_t row_id); - // check any dependencies of this object - entry.catalog->dependency_manager->DropObject(context, &entry, cascade); + //! Find the node with a matching key, optimistic version + Leaf *Lookup(Node *node, Key &key, idx_t depth); - // dropper destructor is called here - // the destructor makes sure to return the value to the previous state - // dropper.~EntryDropper() -} + bool SearchGreater(ARTIndexScanState *state, Key &key, bool inclusive, idx_t max_count, vector &result_ids); + bool SearchLess(ARTIndexScanState *state, Key &upper_bound, bool inclusive, idx_t max_count, + vector &result_ids); + bool SearchCloseRange(ARTIndexScanState *state, Key &lower_bound, Key &upper_bound, bool left_inclusive, + bool right_inclusive, idx_t max_count, vector &result_ids); -void CatalogSet::DropEntryInternal(ClientContext &context, idx_t entry_index, CatalogEntry &entry, bool cascade) { - auto &transaction = Transaction::GetTransaction(context); + void VerifyExistence(DataChunk &chunk, VerifyExistenceType verify_type, string *err_msg_ptr = nullptr); - DropEntryDependencies(context, entry_index, entry, cascade); +private: + //! The estimated ART memory consumption + idx_t estimated_art_size; + //! The estimated memory consumption of a single key + idx_t estimated_key_size; +}; - // create a new entry and replace the currently stored one - // set the timestamp to the timestamp of the current transaction - // and point it at the dummy node - auto value = make_unique(CatalogType::DELETED_ENTRY, entry.catalog, entry.name); - value->timestamp = transaction.transaction_id; - value->child = move(entries[entry_index]); - value->child->parent = value.get(); - value->set = this; - value->deleted = true; +} // namespace duckdb - // push the old entry in the undo buffer for this transaction - transaction.PushCatalogEntry(value->child.get()); - entries[entry_index] = move(value); -} -bool CatalogSet::DropEntry(ClientContext &context, const string &name, bool cascade) { - // lock the catalog for writing - lock_guard write_lock(catalog.write_lock); - // we can only delete an entry that exists - idx_t entry_index; - CatalogEntry *entry; - if (!GetEntryInternal(context, name, entry_index, entry)) { - return false; - } - if (entry->internal) { - throw CatalogException("Cannot drop entry \"%s\" because it is an internal system entry", entry->name); - } +namespace duckdb { - DropEntryInternal(context, entry_index, *entry, cascade); - return true; +IndexCatalogEntry::IndexCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateIndexInfo *info) + : StandardEntry(CatalogType::INDEX_ENTRY, schema, catalog, info->index_name), index(nullptr), sql(info->sql) { } -void CatalogSet::CleanupEntry(CatalogEntry *catalog_entry) { - // destroy the backed up entry: it is no longer required - D_ASSERT(catalog_entry->parent); - if (catalog_entry->parent->type != CatalogType::UPDATED_ENTRY) { - lock_guard lock(catalog_lock); - if (!catalog_entry->deleted) { - // delete the entry from the dependency manager, if it is not deleted yet - catalog_entry->catalog->dependency_manager->EraseObject(catalog_entry); - } - auto parent = catalog_entry->parent; - parent->child = move(catalog_entry->child); - if (parent->deleted && !parent->child && !parent->parent) { - auto mapping_entry = mapping.find(parent->name); - D_ASSERT(mapping_entry != mapping.end()); - auto index = mapping_entry->second->index; - auto entry = entries.find(index); - D_ASSERT(entry != entries.end()); - if (entry->second.get() == parent) { - mapping.erase(mapping_entry); - entries.erase(entry); - } - } +IndexCatalogEntry::~IndexCatalogEntry() { + // remove the associated index from the info + if (!info || !index) { + return; } + info->indexes.RemoveIndex(index); } -bool CatalogSet::HasConflict(ClientContext &context, transaction_t timestamp) { - auto &transaction = Transaction::GetTransaction(context); - return (timestamp >= TRANSACTION_ID_START && timestamp != transaction.transaction_id) || - (timestamp < TRANSACTION_ID_START && timestamp > transaction.start_time); -} - -MappingValue *CatalogSet::GetMapping(ClientContext &context, const string &name, bool get_latest) { - MappingValue *mapping_value; - auto entry = mapping.find(name); - if (entry != mapping.end()) { - mapping_value = entry->second.get(); - } else { - - return nullptr; - } - if (get_latest) { - return mapping_value; +string IndexCatalogEntry::ToSQL() { + if (sql.empty()) { + throw InternalException("Cannot convert INDEX to SQL because it was not created with a SQL statement"); } - while (mapping_value->child) { - if (UseTimestamp(context, mapping_value->timestamp)) { - break; - } - mapping_value = mapping_value->child.get(); - D_ASSERT(mapping_value); + if (sql[sql.size() - 1] != ';') { + sql += ";"; } - return mapping_value; + return sql; } -void CatalogSet::PutMapping(ClientContext &context, const string &name, idx_t entry_index) { - auto entry = mapping.find(name); - auto new_value = make_unique(entry_index); - new_value->timestamp = Transaction::GetTransaction(context).transaction_id; - if (entry != mapping.end()) { - if (HasConflict(context, entry->second->timestamp)) { - throw TransactionException("Catalog write-write conflict on name \"%s\"", name); - } - new_value->child = move(entry->second); - new_value->child->parent = new_value.get(); - } - mapping[name] = move(new_value); +void IndexCatalogEntry::Serialize(duckdb::MetaBlockWriter &serializer) { + // Here we serialize the index metadata in the following order: + // schema name, table name, index name, sql, index type, index constraint type, expression list. + // column_ids, unbound_expression + FieldWriter writer(serializer); + writer.WriteString(info->schema); + writer.WriteString(info->table); + writer.WriteString(name); + writer.WriteString(sql); + writer.WriteField(index->type); + writer.WriteField(index->constraint_type); + writer.WriteSerializableList(expressions); + writer.WriteSerializableList(parsed_expressions); + writer.WriteList(index->column_ids); + writer.Finalize(); } -void CatalogSet::DeleteMapping(ClientContext &context, const string &name) { - auto entry = mapping.find(name); - D_ASSERT(entry != mapping.end()); - auto delete_marker = make_unique(entry->second->index); - delete_marker->deleted = true; - delete_marker->timestamp = Transaction::GetTransaction(context).transaction_id; - delete_marker->child = move(entry->second); - delete_marker->child->parent = delete_marker.get(); - mapping[name] = move(delete_marker); -} +unique_ptr IndexCatalogEntry::Deserialize(Deserializer &source, ClientContext &context) { + // Here we deserialize the index metadata in the following order: + // root block, root offset, schema name, table name, index name, sql, index type, index constraint type, expression + // list. -bool CatalogSet::UseTimestamp(ClientContext &context, transaction_t timestamp) { - auto &transaction = Transaction::GetTransaction(context); - if (timestamp == transaction.transaction_id) { - // we created this version - return true; - } - if (timestamp < transaction.start_time) { - // this version was commited before we started the transaction - return true; - } - return false; -} + auto create_index_info = make_unique(); -CatalogEntry *CatalogSet::GetEntryForTransaction(ClientContext &context, CatalogEntry *current) { - while (current->child) { - if (UseTimestamp(context, current->timestamp)) { - break; - } - current = current->child.get(); - D_ASSERT(current); - } - return current; -} + FieldReader reader(source); -CatalogEntry *CatalogSet::GetCommittedEntry(CatalogEntry *current) { - while (current->child) { - if (current->timestamp < TRANSACTION_ID_START) { - // this entry is committed: use it - break; - } - current = current->child.get(); - D_ASSERT(current); - } - return current; + create_index_info->schema = reader.ReadRequired(); + create_index_info->table = make_unique(); + create_index_info->table->schema_name = create_index_info->schema; + create_index_info->table->table_name = reader.ReadRequired(); + create_index_info->index_name = reader.ReadRequired(); + create_index_info->sql = reader.ReadRequired(); + create_index_info->index_type = IndexType(reader.ReadRequired()); + create_index_info->constraint_type = IndexConstraintType(reader.ReadRequired()); + create_index_info->expressions = reader.ReadRequiredSerializableList(); + create_index_info->parsed_expressions = reader.ReadRequiredSerializableList(); + + create_index_info->column_ids = reader.ReadRequiredList(); + reader.Finalize(); + return create_index_info; } -pair CatalogSet::SimilarEntry(ClientContext &context, const string &name) { - unique_lock lock(catalog_lock); - CreateDefaultEntries(context, lock); +} // namespace duckdb - string result; - idx_t current_score = (idx_t)-1; - for (auto &kv : mapping) { - auto mapping_value = GetMapping(context, kv.first); - if (mapping_value && !mapping_value->deleted) { - auto ldist = StringUtil::LevenshteinDistance(kv.first, name); - if (ldist < current_score) { - current_score = ldist; - result = kv.first; - } - } - } - return {result, current_score}; -} -CatalogEntry *CatalogSet::CreateEntryInternal(ClientContext &context, unique_ptr entry) { - if (mapping.find(entry->name) != mapping.end()) { - return nullptr; - } - auto &name = entry->name; - auto entry_index = current_entry++; - auto catalog_entry = entry.get(); - entry->set = this; - entry->timestamp = 0; +namespace duckdb { - PutMapping(context, name, entry_index); - mapping[name]->timestamp = 0; - entries[entry_index] = move(entry); - return catalog_entry; +PragmaFunctionCatalogEntry::PragmaFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, + CreatePragmaFunctionInfo *info) + : StandardEntry(CatalogType::PRAGMA_FUNCTION_ENTRY, schema, catalog, info->name), functions(move(info->functions)) { } -CatalogEntry *CatalogSet::CreateDefaultEntry(ClientContext &context, const string &name, unique_lock &lock) { - // no entry found with this name, check for defaults - if (!defaults || defaults->created_all_entries) { - // no defaults either: return null - return nullptr; - } - // this catalog set has a default map defined - // check if there is a default entry that we can create with this name - lock.unlock(); - auto entry = defaults->CreateDefaultEntry(context, name); +} // namespace duckdb - lock.lock(); - if (!entry) { - // no default entry - return nullptr; - } - // there is a default entry! create it - auto result = CreateEntryInternal(context, move(entry)); - if (result) { - return result; - } - // we found a default entry, but failed - // this means somebody else created the entry first - // just retry? - lock.unlock(); - return GetEntry(context, name); -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/parsed_data/alter_function_info.hpp +// +// +//===----------------------------------------------------------------------===// -CatalogEntry *CatalogSet::GetEntry(ClientContext &context, const string &name) { - unique_lock lock(catalog_lock); - auto mapping_value = GetMapping(context, name); - if (mapping_value != nullptr && !mapping_value->deleted) { - // we found an entry for this name - // check the version numbers - auto catalog_entry = entries[mapping_value->index].get(); - CatalogEntry *current = GetEntryForTransaction(context, catalog_entry); - if (current->deleted || (current->name != name && !UseTimestamp(context, mapping_value->timestamp))) { - return nullptr; - } - return current; - } - return CreateDefaultEntry(context, name, lock); -} -void CatalogSet::UpdateTimestamp(CatalogEntry *entry, transaction_t timestamp) { - entry->timestamp = timestamp; - mapping[entry->name]->timestamp = timestamp; -} -void CatalogSet::AdjustUserDependency(CatalogEntry *entry, ColumnDefinition &column, bool remove) { - CatalogEntry *user_type_catalog = (CatalogEntry *)LogicalType::GetCatalog(column.Type()); - if (user_type_catalog) { - if (remove) { - catalog.dependency_manager->dependents_map[user_type_catalog].erase(entry->parent); - catalog.dependency_manager->dependencies_map[entry->parent].erase(user_type_catalog); - } else { - catalog.dependency_manager->dependents_map[user_type_catalog].insert(entry); - catalog.dependency_manager->dependencies_map[entry].insert(user_type_catalog); - } - } -} -void CatalogSet::AdjustDependency(CatalogEntry *entry, TableCatalogEntry *table, ColumnDefinition &column, - bool remove) { - bool found = false; - if (column.Type().id() == LogicalTypeId::ENUM) { - for (auto &old_column : table->columns) { - if (old_column.Name() == column.Name() && old_column.Type().id() != LogicalTypeId::ENUM) { - AdjustUserDependency(entry, column, remove); - found = true; - } - } - if (!found) { - AdjustUserDependency(entry, column, remove); - } - } else if (!(column.Type().GetAlias().empty())) { - auto alias = column.Type().GetAlias(); - for (auto &old_column : table->columns) { - auto old_alias = old_column.Type().GetAlias(); - if (old_column.Name() == column.Name() && old_alias != alias) { - AdjustUserDependency(entry, column, remove); - found = true; - } - } - if (!found) { - AdjustUserDependency(entry, column, remove); - } - } -} -void CatalogSet::AdjustTableDependencies(CatalogEntry *entry) { - if (entry->type == CatalogType::TABLE_ENTRY && entry->parent->type == CatalogType::TABLE_ENTRY) { - // If it's a table entry we have to check for possibly removing or adding user type dependencies - auto old_table = (TableCatalogEntry *)entry->parent; - auto new_table = (TableCatalogEntry *)entry; - for (auto &new_column : new_table->columns) { - AdjustDependency(entry, old_table, new_column, false); - } - for (auto &old_column : old_table->columns) { - AdjustDependency(entry, new_table, old_column, true); - } - } -} +namespace duckdb { -void CatalogSet::Undo(CatalogEntry *entry) { - lock_guard write_lock(catalog.write_lock); +//===--------------------------------------------------------------------===// +// Alter Table +//===--------------------------------------------------------------------===// +enum class AlterFunctionType : uint8_t { INVALID = 0, ADD_FUNCTION_OVERLOADS = 1 }; - lock_guard lock(catalog_lock); +struct AlterFunctionInfo : public AlterInfo { + AlterFunctionInfo(AlterFunctionType type, string schema, string name, bool if_exists); + virtual ~AlterFunctionInfo() override; - // entry has to be restored - // and entry->parent has to be removed ("rolled back") + AlterFunctionType alter_function_type; - // i.e. we have to place (entry) as (entry->parent) again - auto &to_be_removed_node = entry->parent; +public: + CatalogType GetCatalogType() const override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(FieldReader &reader); +}; - AdjustTableDependencies(entry); +//===--------------------------------------------------------------------===// +// AddFunctionOverloadInfo +//===--------------------------------------------------------------------===// +struct AddFunctionOverloadInfo : public AlterFunctionInfo { + AddFunctionOverloadInfo(string schema, string name, bool if_exists, ScalarFunctionSet new_overloads); + ~AddFunctionOverloadInfo() override; - if (!to_be_removed_node->deleted) { - // delete the entry from the dependency manager as well - catalog.dependency_manager->EraseObject(to_be_removed_node); - } - if (entry->name != to_be_removed_node->name) { - // rename: clean up the new name when the rename is rolled back - auto removed_entry = mapping.find(to_be_removed_node->name); - if (removed_entry->second->child) { - removed_entry->second->child->parent = nullptr; - mapping[to_be_removed_node->name] = move(removed_entry->second->child); - } else { - mapping.erase(removed_entry); - } - } - if (to_be_removed_node->parent) { - // if the to be removed node has a parent, set the child pointer to the - // to be restored node - to_be_removed_node->parent->child = move(to_be_removed_node->child); - entry->parent = to_be_removed_node->parent; - } else { - // otherwise we need to update the base entry tables - auto &name = entry->name; - to_be_removed_node->child->SetAsRoot(); - entries[mapping[name]->index] = move(to_be_removed_node->child); - entry->parent = nullptr; - } + ScalarFunctionSet new_overloads; - // restore the name if it was deleted - auto restored_entry = mapping.find(entry->name); - if (restored_entry->second->deleted || entry->type == CatalogType::INVALID) { - if (restored_entry->second->child) { - restored_entry->second->child->parent = nullptr; - mapping[entry->name] = move(restored_entry->second->child); - } else { - mapping.erase(restored_entry); - } - } - // we mark the catalog as being modified, since this action can lead to e.g. tables being dropped - entry->catalog->ModifyCatalog(); -} +public: + unique_ptr Copy() const override; +}; -void CatalogSet::CreateDefaultEntries(ClientContext &context, unique_lock &lock) { - if (!defaults || defaults->created_all_entries) { - return; - } - // this catalog set has a default set defined: - auto default_entries = defaults->GetDefaultEntries(); - for (auto &default_entry : default_entries) { - auto map_entry = mapping.find(default_entry); - if (map_entry == mapping.end()) { - // we unlock during the CreateEntry, since it might reference other catalog sets... - // specifically for views this can happen since the view will be bound - lock.unlock(); - auto entry = defaults->CreateDefaultEntry(context, default_entry); - if (!entry) { - throw InternalException("Failed to create default entry for %s", default_entry); - } +} // namespace duckdb - lock.lock(); - CreateEntryInternal(context, move(entry)); - } - } - defaults->created_all_entries = true; -} -void CatalogSet::Scan(ClientContext &context, const std::function &callback) { - // lock the catalog set - unique_lock lock(catalog_lock); - CreateDefaultEntries(context, lock); +namespace duckdb { - for (auto &kv : entries) { - auto entry = kv.second.get(); - entry = GetEntryForTransaction(context, entry); - if (!entry->deleted) { - callback(entry); - } - } +ScalarFunctionCatalogEntry::ScalarFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, + CreateScalarFunctionInfo *info) + : StandardEntry(CatalogType::SCALAR_FUNCTION_ENTRY, schema, catalog, info->name), functions(info->functions) { } -void CatalogSet::Scan(const std::function &callback) { - // lock the catalog set - lock_guard lock(catalog_lock); - for (auto &kv : entries) { - auto entry = kv.second.get(); - entry = GetCommittedEntry(entry); - if (!entry->deleted) { - callback(entry); - } +unique_ptr ScalarFunctionCatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { + if (info->type != AlterType::ALTER_FUNCTION) { + throw InternalException("Attempting to alter ScalarFunctionCatalogEntry with unsupported alter type"); + } + auto &function_info = (AlterFunctionInfo &)*info; + if (function_info.alter_function_type != AlterFunctionType::ADD_FUNCTION_OVERLOADS) { + throw InternalException("Attempting to alter ScalarFunctionCatalogEntry with unsupported alter function type"); } + auto &add_overloads = (AddFunctionOverloadInfo &)function_info; + + ScalarFunctionSet new_set = functions; + if (!new_set.MergeFunctionSet(add_overloads.new_overloads)) { + throw BinderException("Failed to add new function overloads to function \"%s\": function already exists", name); + } + CreateScalarFunctionInfo new_info(move(new_set)); + return make_unique(catalog, schema, &new_info); } + } // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/parser/parser.hpp +// duckdb/catalog/catalog_entry/macro_catalog_entry.hpp // // //===----------------------------------------------------------------------===// @@ -8286,478 +8810,304 @@ void CatalogSet::Scan(const std::function &callback) { +namespace duckdb { + +//! A macro function in the catalog +class TableMacroCatalogEntry : public MacroCatalogEntry { +public: + TableMacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info); + +public: + //! Serialize the meta information of the ScalarMacroCatalogEntry + void Serialize(Serializer &serializer) override; + //! Deserializes to a CreateMacroInfo + static unique_ptr Deserialize(Deserializer &source, ClientContext &context); +}; + +} // namespace duckdb + + //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/parser/simplified_token.hpp +// duckdb/function/scalar_macro_function.hpp // // //===----------------------------------------------------------------------===// +//! The SelectStatement of the view + + + + + namespace duckdb { -//! Simplified tokens are a simplified (dense) representation of the lexer -//! Used for simple syntax highlighting in the tests -enum class SimplifiedTokenType : uint8_t { - SIMPLIFIED_TOKEN_IDENTIFIER, - SIMPLIFIED_TOKEN_NUMERIC_CONSTANT, - SIMPLIFIED_TOKEN_STRING_CONSTANT, - SIMPLIFIED_TOKEN_OPERATOR, - SIMPLIFIED_TOKEN_KEYWORD, - SIMPLIFIED_TOKEN_COMMENT -}; +class ScalarMacroFunction : public MacroFunction { +public: + ScalarMacroFunction(unique_ptr expression); -struct SimplifiedToken { - SimplifiedTokenType type; - idx_t start; -}; + ScalarMacroFunction(void); + //! The macro expression + unique_ptr expression; -enum class KeywordCategory : uint8_t { KEYWORD_RESERVED, KEYWORD_UNRESERVED, KEYWORD_TYPE_FUNC, KEYWORD_COL_NAME }; +public: + unique_ptr Copy() override; -struct ParserKeyword { - string name; - KeywordCategory category; + string ToSQL(const string &schema, const string &name) override; }; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/function/table_macro_function.hpp +// +// +//===----------------------------------------------------------------------===// -namespace duckdb_libpgquery { -struct PGNode; -struct PGList; -} // namespace duckdb_libpgquery -namespace duckdb { -class ParserExtension; -struct ParserOptions { - bool preserve_identifier_case = true; - idx_t max_expression_depth = 1000; - const vector *extensions = nullptr; -}; -//! The parser is responsible for parsing the query and converting it into a set -//! of parsed statements. The parsed statements can then be converted into a -//! plan and executed. -class Parser { -public: - Parser(ParserOptions options = ParserOptions()); - //! The parsed SQL statements from an invocation to ParseQuery. - vector> statements; -public: - //! Attempts to parse a query into a series of SQL statements. Returns - //! whether or not the parsing was successful. If the parsing was - //! successful, the parsed statements will be stored in the statements - //! variable. - void ParseQuery(const string &query); - //! Tokenize a query, returning the raw tokens together with their locations - static vector Tokenize(const string &query); - //! Returns true if the given text matches a keyword of the parser - static bool IsKeyword(const string &text); - //! Returns a list of all keywords in the parser - static vector KeywordList(); - //! Parses a list of expressions (i.e. the list found in a SELECT clause) - DUCKDB_API static vector> ParseExpressionList(const string &select_list, - ParserOptions options = ParserOptions()); - //! Parses a list as found in an ORDER BY expression (i.e. including optional ASCENDING/DESCENDING modifiers) - static vector ParseOrderList(const string &select_list, ParserOptions options = ParserOptions()); - //! Parses an update list (i.e. the list found in the SET clause of an UPDATE statement) - static void ParseUpdateList(const string &update_list, vector &update_columns, - vector> &expressions, - ParserOptions options = ParserOptions()); - //! Parses a VALUES list (i.e. the list of expressions after a VALUES clause) - static vector>> ParseValuesList(const string &value_list, - ParserOptions options = ParserOptions()); - //! Parses a column list (i.e. as found in a CREATE TABLE statement) - static vector ParseColumnList(const string &column_list, ParserOptions options = ParserOptions()); -private: - ParserOptions options; -}; -} // namespace duckdb +namespace duckdb { +class TableMacroFunction : public MacroFunction { +public: + TableMacroFunction(unique_ptr query_node); + TableMacroFunction(void); + //! The main query node + unique_ptr query_node; +public: + unique_ptr Copy() override; + string ToSQL(const string &schema, const string &name) override; +}; +} // namespace duckdb namespace duckdb { -static DefaultMacro internal_macros[] = { - {DEFAULT_SCHEMA, "current_user", {nullptr}, "'duckdb'"}, // user name of current execution context - {DEFAULT_SCHEMA, "current_catalog", {nullptr}, "'duckdb'"}, // name of current database (called "catalog" in the SQL standard) - {DEFAULT_SCHEMA, "current_database", {nullptr}, "'duckdb'"}, // name of current database - {DEFAULT_SCHEMA, "user", {nullptr}, "current_user"}, // equivalent to current_user - {DEFAULT_SCHEMA, "session_user", {nullptr}, "'duckdb'"}, // session user name - {"pg_catalog", "inet_client_addr", {nullptr}, "NULL"}, // address of the remote connection - {"pg_catalog", "inet_client_port", {nullptr}, "NULL"}, // port of the remote connection - {"pg_catalog", "inet_server_addr", {nullptr}, "NULL"}, // address of the local connection - {"pg_catalog", "inet_server_port", {nullptr}, "NULL"}, // port of the local connection - {"pg_catalog", "pg_my_temp_schema", {nullptr}, "0"}, // OID of session's temporary schema, or 0 if none - {"pg_catalog", "pg_is_other_temp_schema", {"schema_id", nullptr}, "false"}, // is schema another session's temporary schema? +MacroCatalogEntry::MacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info) + : StandardEntry( + (info->function->type == MacroType::SCALAR_MACRO ? CatalogType::MACRO_ENTRY : CatalogType::TABLE_MACRO_ENTRY), + schema, catalog, info->name), + function(move(info->function)) { + this->temporary = info->temporary; + this->internal = info->internal; +} - {"pg_catalog", "pg_conf_load_time", {nullptr}, "current_timestamp"}, // configuration load time - {"pg_catalog", "pg_postmaster_start_time", {nullptr}, "current_timestamp"}, // server start time +ScalarMacroCatalogEntry::ScalarMacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info) + : MacroCatalogEntry(catalog, schema, info) { +} - {"pg_catalog", "pg_typeof", {"expression", nullptr}, "lower(typeof(expression))"}, // get the data type of any value +void ScalarMacroCatalogEntry::Serialize(Serializer &main_serializer) { + D_ASSERT(!internal); + auto &scalar_function = (ScalarMacroFunction &)*function; + FieldWriter writer(main_serializer); + writer.WriteString(schema->name); + writer.WriteString(name); + writer.WriteSerializable(*scalar_function.expression); + // writer.WriteSerializableList(function->parameters); + writer.WriteSerializableList(function->parameters); + writer.WriteField((uint32_t)function->default_parameters.size()); + auto &serializer = writer.GetSerializer(); + for (auto &kv : function->default_parameters) { + serializer.WriteString(kv.first); + kv.second->Serialize(serializer); + } + writer.Finalize(); +} - // privilege functions - // {"has_any_column_privilege", {"user", "table", "privilege", nullptr}, "true"}, //boolean //does user have privilege for any column of table - {"pg_catalog", "has_any_column_privilege", {"table", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for any column of table - // {"has_column_privilege", {"user", "table", "column", "privilege", nullptr}, "true"}, //boolean //does user have privilege for column - {"pg_catalog", "has_column_privilege", {"table", "column", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for column - // {"has_database_privilege", {"user", "database", "privilege", nullptr}, "true"}, //boolean //does user have privilege for database - {"pg_catalog", "has_database_privilege", {"database", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for database - // {"has_foreign_data_wrapper_privilege", {"user", "fdw", "privilege", nullptr}, "true"}, //boolean //does user have privilege for foreign-data wrapper - {"pg_catalog", "has_foreign_data_wrapper_privilege", {"fdw", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for foreign-data wrapper - // {"has_function_privilege", {"user", "function", "privilege", nullptr}, "true"}, //boolean //does user have privilege for function - {"pg_catalog", "has_function_privilege", {"function", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for function - // {"has_language_privilege", {"user", "language", "privilege", nullptr}, "true"}, //boolean //does user have privilege for language - {"pg_catalog", "has_language_privilege", {"language", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for language - // {"has_schema_privilege", {"user", "schema, privilege", nullptr}, "true"}, //boolean //does user have privilege for schema - {"pg_catalog", "has_schema_privilege", {"schema", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for schema - // {"has_sequence_privilege", {"user", "sequence", "privilege", nullptr}, "true"}, //boolean //does user have privilege for sequence - {"pg_catalog", "has_sequence_privilege", {"sequence", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for sequence - // {"has_server_privilege", {"user", "server", "privilege", nullptr}, "true"}, //boolean //does user have privilege for foreign server - {"pg_catalog", "has_server_privilege", {"server", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for foreign server - // {"has_table_privilege", {"user", "table", "privilege", nullptr}, "true"}, //boolean //does user have privilege for table - {"pg_catalog", "has_table_privilege", {"table", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for table - // {"has_tablespace_privilege", {"user", "tablespace", "privilege", nullptr}, "true"}, //boolean //does user have privilege for tablespace - {"pg_catalog", "has_tablespace_privilege", {"tablespace", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for tablespace +unique_ptr ScalarMacroCatalogEntry::Deserialize(Deserializer &main_source, ClientContext &context) { + auto info = make_unique(CatalogType::MACRO_ENTRY); + FieldReader reader(main_source); + info->schema = reader.ReadRequired(); + info->name = reader.ReadRequired(); + auto expression = reader.ReadRequiredSerializable(); + auto func = make_unique(move(expression)); + info->function = move(func); + info->function->parameters = reader.ReadRequiredSerializableList(); + auto default_param_count = reader.ReadRequired(); + auto &source = reader.GetSource(); + for (idx_t i = 0; i < default_param_count; i++) { + auto name = source.Read(); + info->function->default_parameters[name] = ParsedExpression::Deserialize(source); + } + // dont like this + // info->type=CatalogType::MACRO_ENTRY; + reader.Finalize(); + return info; +} - // various postgres system functions - {"pg_catalog", "pg_get_viewdef", {"oid", nullptr}, "(select sql from duckdb_views() v where v.view_oid=oid)"}, - {"pg_catalog", "pg_get_constraintdef", {"constraint_oid", "pretty_bool", nullptr}, "(select constraint_text from duckdb_constraints() d_constraint where d_constraint.table_oid=constraint_oid/1000000 and d_constraint.constraint_index=constraint_oid%1000000)"}, - {"pg_catalog", "pg_get_expr", {"pg_node_tree", "relation_oid", nullptr}, "pg_node_tree"}, - {"pg_catalog", "format_pg_type", {"type_name", nullptr}, "case when logical_type='FLOAT' then 'real' when logical_type='DOUBLE' then 'double precision' when logical_type='DECIMAL' then 'numeric' when logical_type='VARCHAR' then 'character varying' when logical_type='BLOB' then 'bytea' when logical_type='TIMESTAMP' then 'timestamp without time zone' when logical_type='TIME' then 'time without time zone' else lower(logical_type) end"}, - {"pg_catalog", "format_type", {"type_oid", "typemod", nullptr}, "(select format_pg_type(type_name) from duckdb_types() t where t.type_oid=type_oid) || case when typemod>0 then concat('(', typemod/1000, ',', typemod%1000, ')') else '' end"}, +TableMacroCatalogEntry::TableMacroCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateMacroInfo *info) + : MacroCatalogEntry(catalog, schema, info) { +} - {"pg_catalog", "pg_has_role", {"user", "role", "privilege", nullptr}, "true"}, //boolean //does user have privilege for role - {"pg_catalog", "pg_has_role", {"role", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for role +void TableMacroCatalogEntry::Serialize(Serializer &main_serializer) { + D_ASSERT(!internal); + FieldWriter writer(main_serializer); - {"pg_catalog", "col_description", {"table_oid", "column_number", nullptr}, "NULL"}, // get comment for a table column - {"pg_catalog", "obj_description", {"object_oid", "catalog_name", nullptr}, "NULL"}, // get comment for a database object - {"pg_catalog", "shobj_description", {"object_oid", "catalog_name", nullptr}, "NULL"}, // get comment for a shared database object + auto &table_function = (TableMacroFunction &)*function; + writer.WriteString(schema->name); + writer.WriteString(name); + writer.WriteSerializable(*table_function.query_node); + writer.WriteSerializableList(function->parameters); + writer.WriteField((uint32_t)function->default_parameters.size()); + auto &serializer = writer.GetSerializer(); + for (auto &kv : function->default_parameters) { + serializer.WriteString(kv.first); + kv.second->Serialize(serializer); + } + writer.Finalize(); +} - // visibility functions - {"pg_catalog", "pg_collation_is_visible", {"collation_oid", nullptr}, "true"}, - {"pg_catalog", "pg_conversion_is_visible", {"conversion_oid", nullptr}, "true"}, - {"pg_catalog", "pg_function_is_visible", {"function_oid", nullptr}, "true"}, - {"pg_catalog", "pg_opclass_is_visible", {"opclass_oid", nullptr}, "true"}, - {"pg_catalog", "pg_operator_is_visible", {"operator_oid", nullptr}, "true"}, - {"pg_catalog", "pg_opfamily_is_visible", {"opclass_oid", nullptr}, "true"}, - {"pg_catalog", "pg_table_is_visible", {"table_oid", nullptr}, "true"}, - {"pg_catalog", "pg_ts_config_is_visible", {"config_oid", nullptr}, "true"}, - {"pg_catalog", "pg_ts_dict_is_visible", {"dict_oid", nullptr}, "true"}, - {"pg_catalog", "pg_ts_parser_is_visible", {"parser_oid", nullptr}, "true"}, - {"pg_catalog", "pg_ts_template_is_visible", {"template_oid", nullptr}, "true"}, - {"pg_catalog", "pg_type_is_visible", {"type_oid", nullptr}, "true"}, +unique_ptr TableMacroCatalogEntry::Deserialize(Deserializer &main_source, ClientContext &context) { + auto info = make_unique(CatalogType::TABLE_MACRO_ENTRY); + FieldReader reader(main_source); + info->schema = reader.ReadRequired(); + info->name = reader.ReadRequired(); + auto query_node = reader.ReadRequiredSerializable(); + auto table_function = make_unique(move(query_node)); + info->function = move(table_function); + info->function->parameters = reader.ReadRequiredSerializableList(); + auto default_param_count = reader.ReadRequired(); + auto &source = reader.GetSource(); + for (idx_t i = 0; i < default_param_count; i++) { + auto name = source.Read(); + info->function->default_parameters[name] = ParsedExpression::Deserialize(source); + } - {DEFAULT_SCHEMA, "round_even", {"x", "n", nullptr}, "CASE ((abs(x) * power(10, n+1)) % 10) WHEN 5 THEN round(x/2, n) * 2 ELSE round(x, n) END"}, - {DEFAULT_SCHEMA, "roundbankers", {"x", "n", nullptr}, "round_even(x, n)"}, - {DEFAULT_SCHEMA, "nullif", {"a", "b", nullptr}, "CASE WHEN a=b THEN NULL ELSE a END"}, - {DEFAULT_SCHEMA, "list_append", {"l", "e", nullptr}, "list_concat(l, list_value(e))"}, - {DEFAULT_SCHEMA, "array_append", {"arr", "el", nullptr}, "list_append(arr, el)"}, - {DEFAULT_SCHEMA, "list_prepend", {"e", "l", nullptr}, "list_concat(list_value(e), l)"}, - {DEFAULT_SCHEMA, "array_prepend", {"el", "arr", nullptr}, "list_prepend(el, arr)"}, - {DEFAULT_SCHEMA, "array_pop_back", {"arr", nullptr}, "arr[:LEN(arr)-1]"}, - {DEFAULT_SCHEMA, "array_pop_front", {"arr", nullptr}, "arr[2:]"}, - {DEFAULT_SCHEMA, "array_push_back", {"arr", "e", nullptr}, "list_concat(arr, list_value(e))"}, - {DEFAULT_SCHEMA, "array_push_front", {"arr", "e", nullptr}, "list_concat(list_value(e), arr)"}, - {DEFAULT_SCHEMA, "generate_subscripts", {"arr", "dim", nullptr}, "unnest(generate_series(1, array_length(arr, dim)))"}, - {DEFAULT_SCHEMA, "fdiv", {"x", "y", nullptr}, "floor(x/y)"}, - {DEFAULT_SCHEMA, "fmod", {"x", "y", nullptr}, "(x-y*floor(x/y))"}, + reader.Finalize(); - // algebraic list aggregates - {DEFAULT_SCHEMA, "list_avg", {"l", nullptr}, "list_aggr(l, 'avg')"}, - {DEFAULT_SCHEMA, "list_var_samp", {"l", nullptr}, "list_aggr(l, 'var_samp')"}, - {DEFAULT_SCHEMA, "list_var_pop", {"l", nullptr}, "list_aggr(l, 'var_pop')"}, - {DEFAULT_SCHEMA, "list_stddev_pop", {"l", nullptr}, "list_aggr(l, 'stddev_pop')"}, - {DEFAULT_SCHEMA, "list_stddev_samp", {"l", nullptr}, "list_aggr(l, 'stddev_samp')"}, - {DEFAULT_SCHEMA, "list_sem", {"l", nullptr}, "list_aggr(l, 'sem')"}, + return info; +} - // distributive list aggregates - {DEFAULT_SCHEMA, "list_approx_count_distinct", {"l", nullptr}, "list_aggr(l, 'approx_count_distinct')"}, - {DEFAULT_SCHEMA, "list_bit_xor", {"l", nullptr}, "list_aggr(l, 'bit_xor')"}, - {DEFAULT_SCHEMA, "list_bit_or", {"l", nullptr}, "list_aggr(l, 'bit_or')"}, - {DEFAULT_SCHEMA, "list_bit_and", {"l", nullptr}, "list_aggr(l, 'bit_and')"}, - {DEFAULT_SCHEMA, "list_bool_and", {"l", nullptr}, "list_aggr(l, 'bool_and')"}, - {DEFAULT_SCHEMA, "list_bool_or", {"l", nullptr}, "list_aggr(l, 'bool_or')"}, - {DEFAULT_SCHEMA, "list_count", {"l", nullptr}, "list_aggr(l, 'count')"}, - {DEFAULT_SCHEMA, "list_entropy", {"l", nullptr}, "list_aggr(l, 'entropy')"}, - {DEFAULT_SCHEMA, "list_last", {"l", nullptr}, "list_aggr(l, 'last')"}, - {DEFAULT_SCHEMA, "list_first", {"l", nullptr}, "list_aggr(l, 'first')"}, - {DEFAULT_SCHEMA, "list_any_value", {"l", nullptr}, "list_aggr(l, 'any_value')"}, - {DEFAULT_SCHEMA, "list_kurtosis", {"l", nullptr}, "list_aggr(l, 'kurtosis')"}, - {DEFAULT_SCHEMA, "list_min", {"l", nullptr}, "list_aggr(l, 'min')"}, - {DEFAULT_SCHEMA, "list_max", {"l", nullptr}, "list_aggr(l, 'max')"}, - {DEFAULT_SCHEMA, "list_product", {"l", nullptr}, "list_aggr(l, 'product')"}, - {DEFAULT_SCHEMA, "list_skewness", {"l", nullptr}, "list_aggr(l, 'skewness')"}, - {DEFAULT_SCHEMA, "list_sum", {"l", nullptr}, "list_aggr(l, 'sum')"}, - {DEFAULT_SCHEMA, "list_string_agg", {"l", nullptr}, "list_aggr(l, 'string_agg')"}, +} // namespace duckdb - // holistic list aggregates - {DEFAULT_SCHEMA, "list_mode", {"l", nullptr}, "list_aggr(l, 'mode')"}, - {DEFAULT_SCHEMA, "list_median", {"l", nullptr}, "list_aggr(l, 'median')"}, - {DEFAULT_SCHEMA, "list_mad", {"l", nullptr}, "list_aggr(l, 'mad')"}, - // nested list aggregates - {DEFAULT_SCHEMA, "list_histogram", {"l", nullptr}, "list_aggr(l, 'histogram')"}, - {nullptr, nullptr, {nullptr}, nullptr} - }; -unique_ptr DefaultFunctionGenerator::CreateInternalTableMacroInfo(DefaultMacro &default_macro, unique_ptr function) { - for (idx_t param_idx = 0; default_macro.parameters[param_idx] != nullptr; param_idx++) { - function->parameters.push_back( - make_unique(default_macro.parameters[param_idx])); - } - auto bind_info = make_unique(); - bind_info->schema = default_macro.schema; - bind_info->name = default_macro.name; - bind_info->temporary = true; - bind_info->internal = true; - bind_info->type = function->type == MacroType::TABLE_MACRO ? CatalogType::TABLE_MACRO_ENTRY : CatalogType::MACRO_ENTRY; - bind_info->function = move(function); - return bind_info; -} -unique_ptr DefaultFunctionGenerator::CreateInternalMacroInfo(DefaultMacro &default_macro) { - // parse the expression - auto expressions = Parser::ParseExpressionList(default_macro.macro); - D_ASSERT(expressions.size() == 1); - auto result = make_unique(move(expressions[0])); - return CreateInternalTableMacroInfo(default_macro, move(result)); -} -unique_ptr DefaultFunctionGenerator::CreateInternalTableMacroInfo(DefaultMacro &default_macro) { - Parser parser; - parser.ParseQuery(default_macro.macro); - D_ASSERT(parser.statements.size() == 1); - D_ASSERT(parser.statements[0]->type == StatementType::SELECT_STATEMENT); - auto &select = (SelectStatement &) *parser.statements[0]; - auto result = make_unique(move(select.node)); - return CreateInternalTableMacroInfo(default_macro, move(result)); -} -static unique_ptr GetDefaultFunction(const string &input_schema, const string &input_name) { - auto schema = StringUtil::Lower(input_schema); - auto name = StringUtil::Lower(input_name); - for (idx_t index = 0; internal_macros[index].name != nullptr; index++) { - if (internal_macros[index].schema == schema && internal_macros[index].name == name) { - return DefaultFunctionGenerator::CreateInternalMacroInfo(internal_macros[index]); - } - } - return nullptr; -} -DefaultFunctionGenerator::DefaultFunctionGenerator(Catalog &catalog, SchemaCatalogEntry *schema) - : DefaultGenerator(catalog), schema(schema) { -} -unique_ptr DefaultFunctionGenerator::CreateDefaultEntry(ClientContext &context, - const string &entry_name) { - auto info = GetDefaultFunction(schema->name, entry_name); - if (info) { - return make_unique_base(&catalog, schema, (CreateMacroInfo *)info.get()); - } - return nullptr; -} -vector DefaultFunctionGenerator::GetDefaultEntries() { - vector result; - for (idx_t index = 0; internal_macros[index].name != nullptr; index++) { - if (internal_macros[index].schema == schema->name) { - result.emplace_back(internal_macros[index].name); - } - } - return result; -} -} // namespace duckdb + +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/catalog/default/default_functions.hpp +// +// +//===----------------------------------------------------------------------===// + + namespace duckdb { +class SchemaCatalogEntry; -struct DefaultSchema { +struct DefaultMacro { + const char *schema; const char *name; + const char *parameters[8]; + const char *macro; }; -static DefaultSchema internal_schemas[] = {{"information_schema"}, {"pg_catalog"}, {nullptr}}; +class DefaultFunctionGenerator : public DefaultGenerator { +public: + DefaultFunctionGenerator(Catalog &catalog, SchemaCatalogEntry *schema); -static bool GetDefaultSchema(const string &input_schema) { - auto schema = StringUtil::Lower(input_schema); - for (idx_t index = 0; internal_schemas[index].name != nullptr; index++) { - if (internal_schemas[index].name == schema) { - return true; - } - } - return false; -} + SchemaCatalogEntry *schema; -DefaultSchemaGenerator::DefaultSchemaGenerator(Catalog &catalog) : DefaultGenerator(catalog) { -} + DUCKDB_API static unique_ptr CreateInternalMacroInfo(DefaultMacro &default_macro); + DUCKDB_API static unique_ptr CreateInternalTableMacroInfo(DefaultMacro &default_macro); -unique_ptr DefaultSchemaGenerator::CreateDefaultEntry(ClientContext &context, const string &entry_name) { - if (GetDefaultSchema(entry_name)) { - return make_unique_base(&catalog, StringUtil::Lower(entry_name), true); - } - return nullptr; -} +public: + unique_ptr CreateDefaultEntry(ClientContext &context, const string &entry_name) override; + vector GetDefaultEntries() override; -vector DefaultSchemaGenerator::GetDefaultEntries() { - vector result; - for (idx_t index = 0; internal_schemas[index].name != nullptr; index++) { - result.emplace_back(internal_schemas[index].name); - } - return result; -} +private: + static unique_ptr CreateInternalTableMacroInfo(DefaultMacro &default_macro, + unique_ptr function); +}; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/catalog/default/default_views.hpp +// +// +//===----------------------------------------------------------------------===// + namespace duckdb { +class SchemaCatalogEntry; -struct DefaultType { - const char *name; - LogicalTypeId type; +class DefaultViewGenerator : public DefaultGenerator { +public: + DefaultViewGenerator(Catalog &catalog, SchemaCatalogEntry *schema); + + SchemaCatalogEntry *schema; + +public: + unique_ptr CreateDefaultEntry(ClientContext &context, const string &entry_name) override; + vector GetDefaultEntries() override; }; -static DefaultType internal_types[] = {{"int", LogicalTypeId::INTEGER}, - {"int4", LogicalTypeId::INTEGER}, - {"signed", LogicalTypeId::INTEGER}, - {"integer", LogicalTypeId::INTEGER}, - {"integral", LogicalTypeId::INTEGER}, - {"int32", LogicalTypeId::INTEGER}, - {"varchar", LogicalTypeId::VARCHAR}, - {"bpchar", LogicalTypeId::VARCHAR}, - {"text", LogicalTypeId::VARCHAR}, - {"string", LogicalTypeId::VARCHAR}, - {"char", LogicalTypeId::VARCHAR}, - {"nvarchar", LogicalTypeId::VARCHAR}, - {"bytea", LogicalTypeId::BLOB}, - {"blob", LogicalTypeId::BLOB}, - {"varbinary", LogicalTypeId::BLOB}, - {"binary", LogicalTypeId::BLOB}, - {"int8", LogicalTypeId::BIGINT}, - {"bigint", LogicalTypeId::BIGINT}, - {"int64", LogicalTypeId::BIGINT}, - {"long", LogicalTypeId::BIGINT}, - {"oid", LogicalTypeId::BIGINT}, - {"int2", LogicalTypeId::SMALLINT}, - {"smallint", LogicalTypeId::SMALLINT}, - {"short", LogicalTypeId::SMALLINT}, - {"int16", LogicalTypeId::SMALLINT}, - {"timestamp", LogicalTypeId::TIMESTAMP}, - {"datetime", LogicalTypeId::TIMESTAMP}, - {"timestamp_us", LogicalTypeId::TIMESTAMP}, - {"timestamp_ms", LogicalTypeId::TIMESTAMP_MS}, - {"timestamp_ns", LogicalTypeId::TIMESTAMP_NS}, - {"timestamp_s", LogicalTypeId::TIMESTAMP_SEC}, - {"bool", LogicalTypeId::BOOLEAN}, - {"boolean", LogicalTypeId::BOOLEAN}, - {"logical", LogicalTypeId::BOOLEAN}, - {"decimal", LogicalTypeId::DECIMAL}, - {"dec", LogicalTypeId::DECIMAL}, - {"numeric", LogicalTypeId::DECIMAL}, - {"real", LogicalTypeId::FLOAT}, - {"float4", LogicalTypeId::FLOAT}, - {"float", LogicalTypeId::FLOAT}, - {"double", LogicalTypeId::DOUBLE}, - {"float8", LogicalTypeId::DOUBLE}, - {"tinyint", LogicalTypeId::TINYINT}, - {"int1", LogicalTypeId::TINYINT}, - {"date", LogicalTypeId::DATE}, - {"time", LogicalTypeId::TIME}, - {"interval", LogicalTypeId::INTERVAL}, - {"hugeint", LogicalTypeId::HUGEINT}, - {"int128", LogicalTypeId::HUGEINT}, - {"uuid", LogicalTypeId::UUID}, - {"guid", LogicalTypeId::UUID}, - {"struct", LogicalTypeId::STRUCT}, - {"row", LogicalTypeId::STRUCT}, - {"list", LogicalTypeId::LIST}, - {"map", LogicalTypeId::MAP}, - {"utinyint", LogicalTypeId::UTINYINT}, - {"uint8", LogicalTypeId::UTINYINT}, - {"usmallint", LogicalTypeId::USMALLINT}, - {"uint16", LogicalTypeId::USMALLINT}, - {"uinteger", LogicalTypeId::UINTEGER}, - {"uint32", LogicalTypeId::UINTEGER}, - {"ubigint", LogicalTypeId::UBIGINT}, - {"uint64", LogicalTypeId::UBIGINT}, - {"timestamptz", LogicalTypeId::TIMESTAMP_TZ}, - {"timetz", LogicalTypeId::TIME_TZ}, - {"json", LogicalTypeId::JSON}, - {"null", LogicalTypeId::SQLNULL}, - {nullptr, LogicalTypeId::INVALID}}; +} // namespace duckdb -LogicalTypeId DefaultTypeGenerator::GetDefaultType(const string &name) { - auto lower_str = StringUtil::Lower(name); - for (idx_t index = 0; internal_types[index].name != nullptr; index++) { - if (internal_types[index].name == lower_str) { - return internal_types[index].type; - } - } - return LogicalTypeId::INVALID; -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/algorithm.hpp +// +// +//===----------------------------------------------------------------------===// -DefaultTypeGenerator::DefaultTypeGenerator(Catalog &catalog, SchemaCatalogEntry *schema) - : DefaultGenerator(catalog), schema(schema) { -} -unique_ptr DefaultTypeGenerator::CreateDefaultEntry(ClientContext &context, const string &entry_name) { - if (schema->name != DEFAULT_SCHEMA) { - return nullptr; - } - auto type_id = GetDefaultType(entry_name); - if (type_id == LogicalTypeId::INVALID) { - return nullptr; - } - CreateTypeInfo info; - info.name = entry_name; - info.type = LogicalType(type_id); - info.internal = true; - info.temporary = true; - return make_unique_base(&catalog, schema, &info); -} -vector DefaultTypeGenerator::GetDefaultEntries() { - vector result; - if (schema->name != DEFAULT_SCHEMA) { - return result; - } - for (idx_t index = 0; internal_types[index].name != nullptr; index++) { - result.emplace_back(internal_types[index].name); - } - return result; -} +#include -} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/constraints/foreign_key_constraint.hpp +// +// +//===----------------------------------------------------------------------===// @@ -8766,98 +9116,49 @@ vector DefaultTypeGenerator::GetDefaultEntries() { namespace duckdb { -struct DefaultView { - const char *schema; - const char *name; - const char *sql; +class ForeignKeyConstraint : public Constraint { +public: + DUCKDB_API ForeignKeyConstraint(vector pk_columns, vector fk_columns, ForeignKeyInfo info); + + //! The set of main key table's columns + vector pk_columns; + //! The set of foreign key table's columns + vector fk_columns; + ForeignKeyInfo info; + +public: + DUCKDB_API string ToString() const override; + + DUCKDB_API unique_ptr Copy() const override; + + //! Serialize to a stand-alone binary blob + DUCKDB_API void Serialize(FieldWriter &writer) const override; + //! Deserializes a ParsedConstraint + DUCKDB_API static unique_ptr Deserialize(FieldReader &source); }; -static DefaultView internal_views[] = { - {DEFAULT_SCHEMA, "pragma_database_list", "SELECT * FROM pragma_database_list()"}, - {DEFAULT_SCHEMA, "sqlite_master", "select 'table' \"type\", table_name \"name\", table_name \"tbl_name\", 0 rootpage, sql from duckdb_tables union all select 'view' \"type\", view_name \"name\", view_name \"tbl_name\", 0 rootpage, sql from duckdb_views union all select 'index' \"type\", index_name \"name\", table_name \"tbl_name\", 0 rootpage, sql from duckdb_indexes;"}, - {DEFAULT_SCHEMA, "sqlite_schema", "SELECT * FROM sqlite_master"}, - {DEFAULT_SCHEMA, "sqlite_temp_master", "SELECT * FROM sqlite_master"}, - {DEFAULT_SCHEMA, "sqlite_temp_schema", "SELECT * FROM sqlite_master"}, - {DEFAULT_SCHEMA, "duckdb_constraints", "SELECT * FROM duckdb_constraints()"}, - {DEFAULT_SCHEMA, "duckdb_columns", "SELECT * FROM duckdb_columns() WHERE NOT internal"}, - {DEFAULT_SCHEMA, "duckdb_indexes", "SELECT * FROM duckdb_indexes()"}, - {DEFAULT_SCHEMA, "duckdb_schemas", "SELECT * FROM duckdb_schemas() WHERE NOT internal"}, - {DEFAULT_SCHEMA, "duckdb_tables", "SELECT * FROM duckdb_tables() WHERE NOT internal"}, - {DEFAULT_SCHEMA, "duckdb_types", "SELECT * FROM duckdb_types()"}, - {DEFAULT_SCHEMA, "duckdb_views", "SELECT * FROM duckdb_views() WHERE NOT internal"}, - {"pg_catalog", "pg_am", "SELECT 0 oid, 'art' amname, NULL amhandler, 'i' amtype"}, - {"pg_catalog", "pg_attribute", "SELECT table_oid attrelid, column_name attname, data_type_id atttypid, 0 attstattarget, NULL attlen, column_index attnum, 0 attndims, -1 attcacheoff, case when data_type ilike '%decimal%' then numeric_precision*1000+numeric_scale else -1 end atttypmod, false attbyval, NULL attstorage, NULL attalign, NOT is_nullable attnotnull, column_default IS NOT NULL atthasdef, false atthasmissing, '' attidentity, '' attgenerated, false attisdropped, true attislocal, 0 attinhcount, 0 attcollation, NULL attcompression, NULL attacl, NULL attoptions, NULL attfdwoptions, NULL attmissingval FROM duckdb_columns()"}, - {"pg_catalog", "pg_attrdef", "SELECT column_index oid, table_oid adrelid, column_index adnum, column_default adbin from duckdb_columns() where column_default is not null;"}, - {"pg_catalog", "pg_class", "SELECT table_oid oid, table_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, estimated_size::real reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, index_count > 0 relhasindex, false relisshared, case when temporary then 't' else 'p' end relpersistence, 'r' relkind, column_count relnatts, check_constraint_count relchecks, false relhasoids, has_primary_key relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_tables() UNION ALL SELECT view_oid oid, view_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, 0 reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, false relhasindex, false relisshared, case when temporary then 't' else 'p' end relpersistence, 'v' relkind, column_count relnatts, 0 relchecks, false relhasoids, false relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_views() UNION ALL SELECT sequence_oid oid, sequence_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, 0 reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, false relhasindex, false relisshared, case when temporary then 't' else 'p' end relpersistence, 'S' relkind, 0 relnatts, 0 relchecks, false relhasoids, false relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_sequences() UNION ALL SELECT index_oid oid, index_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, 0 reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, false relhasindex, false relisshared, 't' relpersistence, 'i' relkind, NULL relnatts, 0 relchecks, false relhasoids, false relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_indexes()"}, - {"pg_catalog", "pg_constraint", "SELECT table_oid*1000000+constraint_index oid, constraint_text conname, schema_oid connamespace, CASE WHEN constraint_type='CHECK' then 'c' WHEN constraint_type='UNIQUE' then 'u' WHEN constraint_type='PRIMARY KEY' THEN 'p' ELSE 'x' END contype, false condeferrable, false condeferred, true convalidated, table_oid conrelid, 0 contypid, 0 conindid, 0 conparentid, 0 confrelid, NULL confupdtype, NULL confdeltype, NULL confmatchtype, true conislocal, 0 coninhcount, false connoinherit, constraint_column_indexes conkey, NULL confkey, NULL conpfeqop, NULL conppeqop, NULL conffeqop, NULL conexclop, expression conbin FROM duckdb_constraints()"}, - {"pg_catalog", "pg_depend", "SELECT * FROM duckdb_dependencies()"}, - {"pg_catalog", "pg_description", "SELECT NULL objoid, NULL classoid, NULL objsubid, NULL description WHERE 1=0"}, - {"pg_catalog", "pg_enum", "SELECT NULL oid, NULL enumtypid, NULL enumsortorder, NULL enumlabel WHERE 1=0"}, - {"pg_catalog", "pg_index", "SELECT index_oid indexrelid, table_oid indrelid, 0 indnatts, 0 indnkeyatts, is_unique indisunique, is_primary indisprimary, false indisexclusion, true indimmediate, false indisclustered, true indisvalid, false indcheckxmin, true indisready, true indislive, false indisreplident, NULL::INT[] indkey, NULL::OID[] indcollation, NULL::OID[] indclass, NULL::INT[] indoption, expressions indexprs, NULL indpred FROM duckdb_indexes()"}, - {"pg_catalog", "pg_indexes", "SELECT schema_name schemaname, table_name tablename, index_name indexname, NULL \"tablespace\", sql indexdef FROM duckdb_indexes()"}, - {"pg_catalog", "pg_namespace", "SELECT oid, schema_name nspname, 0 nspowner, NULL nspacl FROM duckdb_schemas()"}, - {"pg_catalog", "pg_sequence", "SELECT sequence_oid seqrelid, 0 seqtypid, start_value seqstart, increment_by seqincrement, max_value seqmax, min_value seqmin, 0 seqcache, cycle seqcycle FROM duckdb_sequences()"}, - {"pg_catalog", "pg_sequences", "SELECT schema_name schemaname, sequence_name sequencename, 'duckdb' sequenceowner, 0 data_type, start_value, min_value, max_value, increment_by, cycle, 0 cache_size, last_value FROM duckdb_sequences()"}, - {"pg_catalog", "pg_tables", "SELECT schema_name schemaname, table_name tablename, 'duckdb' tableowner, NULL \"tablespace\", index_count > 0 hasindexes, false hasrules, false hastriggers FROM duckdb_tables()"}, - {"pg_catalog", "pg_tablespace", "SELECT 0 oid, 'pg_default' spcname, 0 spcowner, NULL spcacl, NULL spcoptions"}, - {"pg_catalog", "pg_type", "SELECT type_oid oid, format_pg_type(type_name) typname, schema_oid typnamespace, 0 typowner, type_size typlen, false typbyval, 'b' typtype, CASE WHEN type_category='NUMERIC' THEN 'N' WHEN type_category='STRING' THEN 'S' WHEN type_category='DATETIME' THEN 'D' WHEN type_category='BOOLEAN' THEN 'B' WHEN type_category='COMPOSITE' THEN 'C' WHEN type_category='USER' THEN 'U' ELSE 'X' END typcategory, false typispreferred, true typisdefined, NULL typdelim, NULL typrelid, NULL typsubscript, NULL typelem, NULL typarray, NULL typinput, NULL typoutput, NULL typreceive, NULL typsend, NULL typmodin, NULL typmodout, NULL typanalyze, 'd' typalign, 'p' typstorage, NULL typnotnull, NULL typbasetype, NULL typtypmod, NULL typndims, NULL typcollation, NULL typdefaultbin, NULL typdefault, NULL typacl FROM duckdb_types();"}, - {"pg_catalog", "pg_views", "SELECT schema_name schemaname, view_name viewname, 'duckdb' viewowner, sql definition FROM duckdb_views()"}, - {"information_schema", "columns", "SELECT NULL table_catalog, schema_name table_schema, table_name, column_name, column_index ordinal_position, column_default, CASE WHEN is_nullable THEN 'YES' ELSE 'NO' END is_nullable, data_type, character_maximum_length, NULL character_octet_length, numeric_precision, numeric_precision_radix, numeric_scale, NULL datetime_precision, NULL interval_type, NULL interval_precision, NULL character_set_catalog, NULL character_set_schema, NULL character_set_name, NULL collation_catalog, NULL collation_schema, NULL collation_name, NULL domain_catalog, NULL domain_schema, NULL domain_name, NULL udt_catalog, NULL udt_schema, NULL udt_name, NULL scope_catalog, NULL scope_schema, NULL scope_name, NULL maximum_cardinality, NULL dtd_identifier, NULL is_self_referencing, NULL is_identity, NULL identity_generation, NULL identity_start, NULL identity_increment, NULL identity_maximum, NULL identity_minimum, NULL identity_cycle, NULL is_generated, NULL generation_expression, NULL is_updatable FROM duckdb_columns;"}, - {"information_schema", "schemata", "SELECT NULL catalog_name, schema_name, 'duckdb' schema_owner, NULL default_character_set_catalog, NULL default_character_set_schema, NULL default_character_set_name, sql sql_path FROM duckdb_schemas()"}, - {"information_schema", "tables", "SELECT NULL table_catalog, schema_name table_schema, table_name, CASE WHEN temporary THEN 'LOCAL TEMPORARY' ELSE 'BASE TABLE' END table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'YES' is_insertable_into, 'NO' is_typed, CASE WHEN temporary THEN 'PRESERVE' ELSE NULL END commit_action FROM duckdb_tables() UNION ALL SELECT NULL table_catalog, schema_name table_schema, view_name table_name, 'VIEW' table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'NO' is_insertable_into, 'NO' is_typed, NULL commit_action FROM duckdb_views;"}, - {nullptr, nullptr, nullptr}}; +} // namespace duckdb + + + -static unique_ptr GetDefaultView(const string &input_schema, const string &input_name) { - auto schema = StringUtil::Lower(input_schema); - auto name = StringUtil::Lower(input_name); - for (idx_t index = 0; internal_views[index].name != nullptr; index++) { - if (internal_views[index].schema == schema && internal_views[index].name == name) { - auto result = make_unique(); - result->schema = schema; - result->sql = internal_views[index].sql; - Parser parser; - parser.ParseQuery(internal_views[index].sql); - D_ASSERT(parser.statements.size() == 1 && parser.statements[0]->type == StatementType::SELECT_STATEMENT); - result->query = unique_ptr_cast(move(parser.statements[0])); - result->temporary = true; - result->internal = true; - result->view_name = name; - return result; - } - } - return nullptr; -} -DefaultViewGenerator::DefaultViewGenerator(Catalog &catalog, SchemaCatalogEntry *schema) - : DefaultGenerator(catalog), schema(schema) { -} -unique_ptr DefaultViewGenerator::CreateDefaultEntry(ClientContext &context, const string &entry_name) { - auto info = GetDefaultView(schema->name, entry_name); - if (info) { - auto binder = Binder::CreateBinder(context); - binder->BindCreateViewInfo(*info); - return make_unique_base(&catalog, schema, info.get()); - } - return nullptr; -} -vector DefaultViewGenerator::GetDefaultEntries() { - vector result; - for (idx_t index = 0; internal_views[index].name != nullptr; index++) { - if (internal_views[index].schema == schema->name) { - result.emplace_back(internal_views[index].name); - } - } - return result; -} -} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/constraints/bound_foreign_key_constraint.hpp +// +// +//===----------------------------------------------------------------------===// + @@ -8866,438 +9167,437 @@ vector DefaultViewGenerator::GetDefaultEntries() { namespace duckdb { -DependencyManager::DependencyManager(Catalog &catalog) : catalog(catalog) { -} - -void DependencyManager::AddObject(ClientContext &context, CatalogEntry *object, - unordered_set &dependencies) { - // check for each object in the sources if they were not deleted yet - for (auto &dependency : dependencies) { - idx_t entry_index; - CatalogEntry *catalog_entry; - if (!dependency->set) { - throw InternalException("Dependency has no set"); +class BoundForeignKeyConstraint : public BoundConstraint { +public: + BoundForeignKeyConstraint(ForeignKeyInfo info_p, physical_index_set_t pk_key_set_p, + physical_index_set_t fk_key_set_p) + : BoundConstraint(ConstraintType::FOREIGN_KEY), info(move(info_p)), pk_key_set(move(pk_key_set_p)), + fk_key_set(move(fk_key_set_p)) { +#ifdef DEBUG + D_ASSERT(info.pk_keys.size() == pk_key_set.size()); + for (auto &key : info.pk_keys) { + D_ASSERT(pk_key_set.find(key) != pk_key_set.end()); } - if (!dependency->set->GetEntryInternal(context, dependency->name, entry_index, catalog_entry)) { - throw InternalException("Dependency has already been deleted?"); + D_ASSERT(info.fk_keys.size() == fk_key_set.size()); + for (auto &key : info.fk_keys) { + D_ASSERT(fk_key_set.find(key) != fk_key_set.end()); } +#endif } - // indexes do not require CASCADE to be dropped, they are simply always dropped along with the table - auto dependency_type = object->type == CatalogType::INDEX_ENTRY ? DependencyType::DEPENDENCY_AUTOMATIC - : DependencyType::DEPENDENCY_REGULAR; - // add the object to the dependents_map of each object that it depends on - for (auto &dependency : dependencies) { - dependents_map[dependency].insert(Dependency(object, dependency_type)); - } - // create the dependents map for this object: it starts out empty - dependents_map[object] = dependency_set_t(); - dependencies_map[object] = dependencies; -} -void DependencyManager::DropObject(ClientContext &context, CatalogEntry *object, bool cascade) { - D_ASSERT(dependents_map.find(object) != dependents_map.end()); + ForeignKeyInfo info; + //! The same keys but stored as an unordered set + physical_index_set_t pk_key_set; + //! The same keys but stored as an unordered set + physical_index_set_t fk_key_set; +}; - // first check the objects that depend on this object - auto &dependent_objects = dependents_map[object]; - for (auto &dep : dependent_objects) { - // look up the entry in the catalog set - auto &catalog_set = *dep.entry->set; - auto mapping_value = catalog_set.GetMapping(context, dep.entry->name, true /* get_latest */); - if (mapping_value == nullptr) { - continue; - } - idx_t entry_index = mapping_value->index; - CatalogEntry *dependency_entry; +} // namespace duckdb - if (!catalog_set.GetEntryInternal(context, entry_index, dependency_entry)) { - // the dependent object was already deleted, no conflict + + + +#include + +namespace duckdb { + +void FindForeignKeyInformation(CatalogEntry *entry, AlterForeignKeyType alter_fk_type, + vector> &fk_arrays) { + if (entry->type != CatalogType::TABLE_ENTRY) { + return; + } + auto *table_entry = (TableCatalogEntry *)entry; + for (idx_t i = 0; i < table_entry->constraints.size(); i++) { + auto &cond = table_entry->constraints[i]; + if (cond->type != ConstraintType::FOREIGN_KEY) { continue; } - // conflict: attempting to delete this object but the dependent object still exists - if (cascade || dep.dependency_type == DependencyType::DEPENDENCY_AUTOMATIC || - dep.dependency_type == DependencyType::DEPENDENCY_OWNS) { - // cascade: drop the dependent object - catalog_set.DropEntryInternal(context, entry_index, *dependency_entry, cascade); - } else { - // no cascade and there are objects that depend on this object: throw error - throw CatalogException("Cannot drop entry \"%s\" because there are entries that " - "depend on it. Use DROP...CASCADE to drop all dependents.", - object->name); + auto &fk = (ForeignKeyConstraint &)*cond; + if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { + fk_arrays.push_back(make_unique(fk.info.schema, fk.info.table, false, entry->name, + fk.pk_columns, fk.fk_columns, fk.info.pk_keys, + fk.info.fk_keys, alter_fk_type)); + } else if (fk.info.type == ForeignKeyType::FK_TYPE_PRIMARY_KEY_TABLE && + alter_fk_type == AlterForeignKeyType::AFT_DELETE) { + throw CatalogException("Could not drop the table because this table is main key table of the table \"%s\"", + fk.info.table); } } } -void DependencyManager::AlterObject(ClientContext &context, CatalogEntry *old_obj, CatalogEntry *new_obj) { - D_ASSERT(dependents_map.find(old_obj) != dependents_map.end()); - D_ASSERT(dependencies_map.find(old_obj) != dependencies_map.end()); +SchemaCatalogEntry::SchemaCatalogEntry(Catalog *catalog, string name_p, bool internal) + : CatalogEntry(CatalogType::SCHEMA_ENTRY, catalog, move(name_p)), + tables(*catalog, make_unique(*catalog, this)), indexes(*catalog), table_functions(*catalog), + copy_functions(*catalog), pragma_functions(*catalog), + functions(*catalog, make_unique(*catalog, this)), sequences(*catalog), + collations(*catalog), types(*catalog, make_unique(*catalog, this)) { + this->internal = internal; +} - // first check the objects that depend on this object - vector owned_objects_to_add; - auto &dependent_objects = dependents_map[old_obj]; - for (auto &dep : dependent_objects) { - // look up the entry in the catalog set - auto &catalog_set = *dep.entry->set; - idx_t entry_index; - CatalogEntry *dependency_entry; - if (!catalog_set.GetEntryInternal(context, dep.entry->name, entry_index, dependency_entry)) { - // the dependent object was already deleted, no conflict - continue; - } - if (dep.dependency_type == DependencyType::DEPENDENCY_OWNS) { - // the dependent object is owned by the current object - owned_objects_to_add.push_back(dep.entry); - continue; - } - // conflict: attempting to alter this object but the dependent object still exists - // no cascade and there are objects that depend on this object: throw error - throw CatalogException("Cannot alter entry \"%s\" because there are entries that " - "depend on it.", - old_obj->name); +CatalogEntry *SchemaCatalogEntry::AddEntry(ClientContext &context, unique_ptr entry, + OnCreateConflict on_conflict, unordered_set dependencies) { + auto entry_name = entry->name; + auto entry_type = entry->type; + auto result = entry.get(); + + // first find the set for this entry + auto &set = GetCatalogSet(entry_type); + + if (name != TEMP_SCHEMA) { + dependencies.insert(this); + } else { + entry->temporary = true; } - // add the new object to the dependents_map of each object that it depends on - auto &old_dependencies = dependencies_map[old_obj]; - vector to_delete; - for (auto &dependency : old_dependencies) { - if (dependency->type == CatalogType::TYPE_ENTRY) { - auto user_type = (TypeCatalogEntry *)dependency; - auto table = (TableCatalogEntry *)new_obj; - bool deleted_dependency = true; - for (auto &column : table->columns) { - if (column.Type() == user_type->user_type) { - deleted_dependency = false; - break; - } - } - if (deleted_dependency) { - to_delete.push_back(dependency); - continue; + if (on_conflict == OnCreateConflict::REPLACE_ON_CONFLICT) { + // CREATE OR REPLACE: first try to drop the entry + auto old_entry = set.GetEntry(context, entry_name); + if (old_entry) { + if (old_entry->type != entry_type) { + throw CatalogException("Existing object %s is of type %s, trying to replace with type %s", entry_name, + CatalogTypeToString(old_entry->type), CatalogTypeToString(entry_type)); } + (void)set.DropEntry(context, entry_name, false); } - dependents_map[dependency].insert(new_obj); } - for (auto &dependency : to_delete) { - old_dependencies.erase(dependency); - dependents_map[dependency].erase(old_obj); - } - - // We might have to add a type dependency - vector to_add; - if (new_obj->type == CatalogType::TABLE_ENTRY) { - auto table = (TableCatalogEntry *)new_obj; - for (auto &column : table->columns) { - auto user_type_catalog = LogicalType::GetCatalog(column.Type()); - if (user_type_catalog) { - to_add.push_back(user_type_catalog); - } + // now try to add the entry + if (!set.CreateEntry(context, entry_name, move(entry), dependencies)) { + // entry already exists! + if (on_conflict == OnCreateConflict::ERROR_ON_CONFLICT) { + throw CatalogException("%s with name \"%s\" already exists!", CatalogTypeToString(entry_type), entry_name); + } else { + return nullptr; } } - // add the new object to the dependency manager - dependents_map[new_obj] = dependency_set_t(); - dependencies_map[new_obj] = old_dependencies; + return result; +} - for (auto &dependency : to_add) { - dependencies_map[new_obj].insert(dependency); - dependents_map[dependency].insert(new_obj); - } +CatalogEntry *SchemaCatalogEntry::AddEntry(ClientContext &context, unique_ptr entry, + OnCreateConflict on_conflict) { + unordered_set dependencies; + return AddEntry(context, move(entry), on_conflict, dependencies); +} - for (auto &dependency : owned_objects_to_add) { - dependents_map[new_obj].insert(Dependency(dependency, DependencyType::DEPENDENCY_OWNS)); - dependents_map[dependency].insert(Dependency(new_obj, DependencyType::DEPENDENCY_OWNED_BY)); - dependencies_map[new_obj].insert(dependency); - } +CatalogEntry *SchemaCatalogEntry::CreateSequence(ClientContext &context, CreateSequenceInfo *info) { + auto sequence = make_unique(catalog, this, info); + return AddEntry(context, move(sequence), info->on_conflict); } -void DependencyManager::EraseObject(CatalogEntry *object) { - // obtain the writing lock - EraseObjectInternal(object); +CatalogEntry *SchemaCatalogEntry::CreateType(ClientContext &context, CreateTypeInfo *info) { + auto type_entry = make_unique(catalog, this, info); + return AddEntry(context, move(type_entry), info->on_conflict); } -void DependencyManager::EraseObjectInternal(CatalogEntry *object) { - if (dependents_map.find(object) == dependents_map.end()) { - // dependencies already removed - return; +CatalogEntry *SchemaCatalogEntry::CreateTable(ClientContext &context, BoundCreateTableInfo *info) { + auto table = make_unique(catalog, this, info); + table->storage->info->cardinality = table->storage->GetTotalRows(); + + CatalogEntry *entry = AddEntry(context, move(table), info->Base().on_conflict, info->dependencies); + if (!entry) { + return nullptr; } - D_ASSERT(dependents_map.find(object) != dependents_map.end()); - D_ASSERT(dependencies_map.find(object) != dependencies_map.end()); - // now for each of the dependencies, erase the entries from the dependents_map - for (auto &dependency : dependencies_map[object]) { - auto entry = dependents_map.find(dependency); - if (entry != dependents_map.end()) { - D_ASSERT(entry->second.find(object) != entry->second.end()); - entry->second.erase(object); - } + + // add a foreign key constraint in main key table if there is a foreign key constraint + vector> fk_arrays; + FindForeignKeyInformation(entry, AlterForeignKeyType::AFT_ADD, fk_arrays); + for (idx_t i = 0; i < fk_arrays.size(); i++) { + // alter primary key table + AlterForeignKeyInfo *fk_info = fk_arrays[i].get(); + catalog->Alter(context, fk_info); + + // make a dependency between this table and referenced table + auto &set = GetCatalogSet(CatalogType::TABLE_ENTRY); + info->dependencies.insert(set.GetEntry(context, fk_info->name)); } - // erase the dependents and dependencies for this object - dependents_map.erase(object); - dependencies_map.erase(object); + return entry; } -void DependencyManager::Scan(const std::function &callback) { - lock_guard write_lock(catalog.write_lock); - for (auto &entry : dependents_map) { - for (auto &dependent : entry.second) { - callback(entry.first, dependent.entry, dependent.dependency_type); - } - } +CatalogEntry *SchemaCatalogEntry::CreateView(ClientContext &context, CreateViewInfo *info) { + auto view = make_unique(catalog, this, info); + return AddEntry(context, move(view), info->on_conflict); } -void DependencyManager::AddOwnership(ClientContext &context, CatalogEntry *owner, CatalogEntry *entry) { - // lock the catalog for writing - lock_guard write_lock(catalog.write_lock); +CatalogEntry *SchemaCatalogEntry::CreateIndex(ClientContext &context, CreateIndexInfo *info, TableCatalogEntry *table) { + unordered_set dependencies; + dependencies.insert(table); + auto index = make_unique(catalog, this, info); + return AddEntry(context, move(index), info->on_conflict, dependencies); +} - // If the owner is already owned by something else, throw an error - for (auto &dep : dependents_map[owner]) { - if (dep.dependency_type == DependencyType::DEPENDENCY_OWNED_BY) { - throw CatalogException(owner->name + " already owned by " + dep.entry->name); - } - } +CatalogEntry *SchemaCatalogEntry::CreateCollation(ClientContext &context, CreateCollationInfo *info) { + auto collation = make_unique(catalog, this, info); + return AddEntry(context, move(collation), info->on_conflict); +} - // If the entry is already owned, throw an error - for (auto &dep : dependents_map[entry]) { - // if the entry is already owned, throw error - if (dep.entry != owner) { - throw CatalogException(entry->name + " already depends on " + dep.entry->name); - } - // if the entry owns the owner, throw error - if (dep.entry == owner && dep.dependency_type == DependencyType::DEPENDENCY_OWNS) { - throw CatalogException(entry->name + " already owns " + owner->name + - ". Cannot have circular dependencies"); +CatalogEntry *SchemaCatalogEntry::CreateTableFunction(ClientContext &context, CreateTableFunctionInfo *info) { + auto table_function = make_unique(catalog, this, info); + return AddEntry(context, move(table_function), info->on_conflict); +} + +CatalogEntry *SchemaCatalogEntry::CreateCopyFunction(ClientContext &context, CreateCopyFunctionInfo *info) { + auto copy_function = make_unique(catalog, this, info); + return AddEntry(context, move(copy_function), info->on_conflict); +} + +CatalogEntry *SchemaCatalogEntry::CreatePragmaFunction(ClientContext &context, CreatePragmaFunctionInfo *info) { + auto pragma_function = make_unique(catalog, this, info); + return AddEntry(context, move(pragma_function), info->on_conflict); +} + +CatalogEntry *SchemaCatalogEntry::CreateFunction(ClientContext &context, CreateFunctionInfo *info) { + if (info->on_conflict == OnCreateConflict::ALTER_ON_CONFLICT) { + // check if the original entry exists + auto &catalog_set = GetCatalogSet(info->type); + auto current_entry = catalog_set.GetEntry(context, info->name); + if (current_entry) { + // the current entry exists - alter it instead + auto alter_info = info->GetAlterInfo(); + Alter(context, alter_info.get()); + return nullptr; } } + unique_ptr function; + switch (info->type) { + case CatalogType::SCALAR_FUNCTION_ENTRY: + function = make_unique_base(catalog, this, + (CreateScalarFunctionInfo *)info); + break; + case CatalogType::MACRO_ENTRY: + // create a macro function + function = make_unique_base(catalog, this, (CreateMacroInfo *)info); + break; - // Emplace guarantees that the same object cannot be inserted twice in the unordered_set - // In the case AddOwnership is called twice, because of emplace, the object will not be repeated in the set. - // We use an automatic dependency because if the Owner gets deleted, then the owned objects are also deleted - dependents_map[owner].emplace(Dependency(entry, DependencyType::DEPENDENCY_OWNS)); - dependents_map[entry].emplace(Dependency(owner, DependencyType::DEPENDENCY_OWNED_BY)); - dependencies_map[owner].emplace(entry); + case CatalogType::TABLE_MACRO_ENTRY: + // create a macro table function + function = make_unique_base(catalog, this, (CreateMacroInfo *)info); + break; + case CatalogType::AGGREGATE_FUNCTION_ENTRY: + D_ASSERT(info->type == CatalogType::AGGREGATE_FUNCTION_ENTRY); + // create an aggregate function + function = make_unique_base(catalog, this, + (CreateAggregateFunctionInfo *)info); + break; + default: + throw InternalException("Unknown function type \"%s\"", CatalogTypeToString(info->type)); + } + return AddEntry(context, move(function), info->on_conflict); } -} // namespace duckdb - +CatalogEntry *SchemaCatalogEntry::AddFunction(ClientContext &context, CreateFunctionInfo *info) { + auto entry = GetCatalogSet(info->type).GetEntry(context, info->name); + if (!entry) { + return CreateFunction(context, info); + } + info->on_conflict = OnCreateConflict::REPLACE_ON_CONFLICT; + switch (info->type) { + case CatalogType::SCALAR_FUNCTION_ENTRY: { + auto scalar_info = (CreateScalarFunctionInfo *)info; + auto &scalars = *(ScalarFunctionCatalogEntry *)entry; + for (const auto &scalar : scalars.functions.functions) { + scalar_info->functions.AddFunction(scalar); + } + break; + } + case CatalogType::AGGREGATE_FUNCTION_ENTRY: { + auto agg_info = (CreateAggregateFunctionInfo *)info; + auto &aggs = *(AggregateFunctionCatalogEntry *)entry; + for (const auto &agg : aggs.functions.functions) { + agg_info->functions.AddFunction(agg); + } + break; + } + default: + // Macros can only be replaced because there is only one of each name. + throw InternalException("Unsupported function type \"%s\" for adding", CatalogTypeToString(info->type)); + } + return CreateFunction(context, info); +} +void SchemaCatalogEntry::DropEntry(ClientContext &context, DropInfo *info) { + auto &set = GetCatalogSet(info->type); -#ifdef DUCKDB_DEBUG_ALLOCATION + // first find the entry + auto existing_entry = set.GetEntry(context, info->name); + if (!existing_entry) { + if (!info->if_exists) { + throw CatalogException("%s with name \"%s\" does not exist!", CatalogTypeToString(info->type), info->name); + } + return; + } + if (existing_entry->type != info->type) { + throw CatalogException("Existing object %s is of type %s, trying to replace with type %s", info->name, + CatalogTypeToString(existing_entry->type), CatalogTypeToString(info->type)); + } + // if there is a foreign key constraint, get that information + vector> fk_arrays; + FindForeignKeyInformation(existing_entry, AlterForeignKeyType::AFT_DELETE, fk_arrays); + if (!set.DropEntry(context, info->name, info->cascade)) { + throw InternalException("Could not drop element because of an internal error"); + } -#include -#endif + // remove the foreign key constraint in main key table if main key table's name is valid + for (idx_t i = 0; i < fk_arrays.size(); i++) { + // alter primary key tablee + Catalog::GetCatalog(context).Alter(context, fk_arrays[i].get()); + } +} -namespace duckdb { +void SchemaCatalogEntry::Alter(ClientContext &context, AlterInfo *info) { + CatalogType type = info->GetCatalogType(); + auto &set = GetCatalogSet(type); + if (info->type == AlterType::CHANGE_OWNERSHIP) { + if (!set.AlterOwnership(context, (ChangeOwnershipInfo *)info)) { + throw CatalogException("Couldn't change ownership!"); + } + } else { + string name = info->name; + if (!set.AlterEntry(context, name, info)) { + throw CatalogException("Entry with name \"%s\" does not exist!", name); + } + } +} -AllocatedData::AllocatedData() : allocator(nullptr), pointer(nullptr), allocated_size(0) { +void SchemaCatalogEntry::Scan(ClientContext &context, CatalogType type, + const std::function &callback) { + auto &set = GetCatalogSet(type); + set.Scan(context, callback); } -AllocatedData::AllocatedData(Allocator &allocator, data_ptr_t pointer, idx_t allocated_size) - : allocator(&allocator), pointer(pointer), allocated_size(allocated_size) { +void SchemaCatalogEntry::Scan(CatalogType type, const std::function &callback) { + auto &set = GetCatalogSet(type); + set.Scan(callback); } -AllocatedData::~AllocatedData() { - Reset(); + +void SchemaCatalogEntry::Serialize(Serializer &serializer) { + FieldWriter writer(serializer); + writer.WriteString(name); + writer.Finalize(); } -AllocatedData::AllocatedData(AllocatedData &&other) noexcept - : allocator(other.allocator), pointer(nullptr), allocated_size(0) { - std::swap(pointer, other.pointer); - std::swap(allocated_size, other.allocated_size); +unique_ptr SchemaCatalogEntry::Deserialize(Deserializer &source) { + auto info = make_unique(); + + FieldReader reader(source); + info->schema = reader.ReadRequired(); + reader.Finalize(); + + return info; } -AllocatedData &AllocatedData::operator=(AllocatedData &&other) noexcept { - std::swap(allocator, other.allocator); - std::swap(pointer, other.pointer); - std::swap(allocated_size, other.allocated_size); - return *this; +string SchemaCatalogEntry::ToSQL() { + std::stringstream ss; + ss << "CREATE SCHEMA " << name << ";"; + return ss.str(); } -void AllocatedData::Reset() { - if (!pointer) { - return; +CatalogSet &SchemaCatalogEntry::GetCatalogSet(CatalogType type) { + switch (type) { + case CatalogType::VIEW_ENTRY: + case CatalogType::TABLE_ENTRY: + return tables; + case CatalogType::INDEX_ENTRY: + return indexes; + case CatalogType::TABLE_FUNCTION_ENTRY: + case CatalogType::TABLE_MACRO_ENTRY: + return table_functions; + case CatalogType::COPY_FUNCTION_ENTRY: + return copy_functions; + case CatalogType::PRAGMA_FUNCTION_ENTRY: + return pragma_functions; + case CatalogType::AGGREGATE_FUNCTION_ENTRY: + case CatalogType::SCALAR_FUNCTION_ENTRY: + case CatalogType::MACRO_ENTRY: + return functions; + case CatalogType::SEQUENCE_ENTRY: + return sequences; + case CatalogType::COLLATION_ENTRY: + return collations; + case CatalogType::TYPE_ENTRY: + return types; + default: + throw InternalException("Unsupported catalog type in schema"); } - D_ASSERT(allocator); - allocator->FreeData(pointer, allocated_size); - pointer = nullptr; } -//===--------------------------------------------------------------------===// -// Debug Info -//===--------------------------------------------------------------------===// -struct AllocatorDebugInfo { -#ifdef DEBUG - AllocatorDebugInfo(); - ~AllocatorDebugInfo(); +} // namespace duckdb - static string GetStackTrace(int max_depth = 128); - void AllocateData(data_ptr_t pointer, idx_t size); - void FreeData(data_ptr_t pointer, idx_t size); - void ReallocateData(data_ptr_t pointer, data_ptr_t new_pointer, idx_t old_size, idx_t new_size); -private: - //! The number of bytes that are outstanding (i.e. that have been allocated - but not freed) - //! Used for debug purposes - atomic allocation_count; -#ifdef DUCKDB_DEBUG_ALLOCATION - mutex pointer_lock; - //! Set of active outstanding pointers together with stack traces - unordered_map> pointers; -#endif -#endif -}; -PrivateAllocatorData::PrivateAllocatorData() { -} -PrivateAllocatorData::~PrivateAllocatorData() { -} -//===--------------------------------------------------------------------===// -// Allocator -//===--------------------------------------------------------------------===// -Allocator::Allocator() - : Allocator(Allocator::DefaultAllocate, Allocator::DefaultFree, Allocator::DefaultReallocate, nullptr) { -} -Allocator::Allocator(allocate_function_ptr_t allocate_function_p, free_function_ptr_t free_function_p, - reallocate_function_ptr_t reallocate_function_p, unique_ptr private_data_p) - : allocate_function(allocate_function_p), free_function(free_function_p), - reallocate_function(reallocate_function_p), private_data(move(private_data_p)) { - D_ASSERT(allocate_function); - D_ASSERT(free_function); - D_ASSERT(reallocate_function); -#ifdef DEBUG - if (!private_data) { - private_data = make_unique(); - } - private_data->debug_info = make_unique(); -#endif -} -Allocator::~Allocator() { -} +#include +#include -data_ptr_t Allocator::AllocateData(idx_t size) { - auto result = allocate_function(private_data.get(), size); -#ifdef DEBUG - D_ASSERT(private_data); - private_data->debug_info->AllocateData(result, size); -#endif - return result; -} +namespace duckdb { -void Allocator::FreeData(data_ptr_t pointer, idx_t size) { - if (!pointer) { - return; - } -#ifdef DEBUG - D_ASSERT(private_data); - private_data->debug_info->FreeData(pointer, size); -#endif - free_function(private_data.get(), pointer, size); +SequenceCatalogEntry::SequenceCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateSequenceInfo *info) + : StandardEntry(CatalogType::SEQUENCE_ENTRY, schema, catalog, info->name), usage_count(info->usage_count), + counter(info->start_value), increment(info->increment), start_value(info->start_value), + min_value(info->min_value), max_value(info->max_value), cycle(info->cycle) { + this->temporary = info->temporary; } -data_ptr_t Allocator::ReallocateData(data_ptr_t pointer, idx_t old_size, idx_t size) { - if (!pointer) { - return nullptr; - } - auto new_pointer = reallocate_function(private_data.get(), pointer, old_size, size); -#ifdef DEBUG - D_ASSERT(private_data); - private_data->debug_info->ReallocateData(pointer, new_pointer, old_size, size); -#endif - return new_pointer; +void SequenceCatalogEntry::Serialize(Serializer &serializer) { + FieldWriter writer(serializer); + writer.WriteString(schema->name); + writer.WriteString(name); + writer.WriteField(usage_count); + writer.WriteField(increment); + writer.WriteField(min_value); + writer.WriteField(max_value); + writer.WriteField(counter); + writer.WriteField(cycle); + writer.Finalize(); } -Allocator &Allocator::DefaultAllocator() { - static Allocator DEFAULT_ALLOCATOR; - return DEFAULT_ALLOCATOR; -} +unique_ptr SequenceCatalogEntry::Deserialize(Deserializer &source) { + auto info = make_unique(); -//===--------------------------------------------------------------------===// -// Debug Info (extended) -//===--------------------------------------------------------------------===// -#ifdef DEBUG -AllocatorDebugInfo::AllocatorDebugInfo() { - allocation_count = 0; -} -AllocatorDebugInfo::~AllocatorDebugInfo() { -#ifdef DUCKDB_DEBUG_ALLOCATION - if (allocation_count != 0) { - printf("Outstanding allocations found for Allocator\n"); - for (auto &entry : pointers) { - printf("Allocation of size %ld at address %p\n", entry.second.first, (void *)entry.first); - printf("Stack trace:\n%s\n", entry.second.second.c_str()); - printf("\n"); - } - } -#endif - //! Verify that there is no outstanding memory still associated with the batched allocator - //! Only works for access to the batched allocator through the batched allocator interface - //! If this assertion triggers, enable DUCKDB_DEBUG_ALLOCATION for more information about the allocations - D_ASSERT(allocation_count == 0); -} + FieldReader reader(source); + info->schema = reader.ReadRequired(); + info->name = reader.ReadRequired(); + info->usage_count = reader.ReadRequired(); + info->increment = reader.ReadRequired(); + info->min_value = reader.ReadRequired(); + info->max_value = reader.ReadRequired(); + info->start_value = reader.ReadRequired(); + info->cycle = reader.ReadRequired(); + reader.Finalize(); -string AllocatorDebugInfo::GetStackTrace(int max_depth) { -#ifdef DUCKDB_DEBUG_ALLOCATION - string result; - auto callstack = unique_ptr(new void *[max_depth]); - int frames = backtrace(callstack.get(), max_depth); - char **strs = backtrace_symbols(callstack.get(), frames); - for (int i = 0; i < frames; i++) { - result += strs[i]; - result += "\n"; - } - free(strs); - return result; -#else - throw InternalException("GetStackTrace not supported without DUCKDB_DEBUG_ALLOCATION"); -#endif + return info; } -void AllocatorDebugInfo::AllocateData(data_ptr_t pointer, idx_t size) { - allocation_count += size; -#ifdef DUCKDB_DEBUG_ALLOCATION - lock_guard l(pointer_lock); - pointers[pointer] = make_pair(size, GetStackTrace()); -#endif +string SequenceCatalogEntry::ToSQL() { + std::stringstream ss; + ss << "CREATE SEQUENCE "; + ss << name; + ss << " INCREMENT BY " << increment; + ss << " MINVALUE " << min_value; + ss << " MAXVALUE " << max_value; + ss << " START " << counter; + ss << " " << (cycle ? "CYCLE" : "NO CYCLE") << ";"; + return ss.str(); } +} // namespace duckdb + + + -void AllocatorDebugInfo::FreeData(data_ptr_t pointer, idx_t size) { - D_ASSERT(allocation_count >= size); - allocation_count -= size; -#ifdef DUCKDB_DEBUG_ALLOCATION - lock_guard l(pointer_lock); - // verify that the pointer exists - D_ASSERT(pointers.find(pointer) != pointers.end()); - // verify that the stored size matches the passed in size - D_ASSERT(pointers[pointer].first == size); - // erase the pointer - pointers.erase(pointer); -#endif -} -void AllocatorDebugInfo::ReallocateData(data_ptr_t pointer, data_ptr_t new_pointer, idx_t old_size, idx_t new_size) { - FreeData(pointer, old_size); - AllocateData(new_pointer, new_size); -} -#endif -} // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/arrow/arrow_appender.hpp -// -// -//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/common/arrow/arrow_converter.hpp +// duckdb/parser/constraints/check_constraint.hpp // // //===----------------------------------------------------------------------===// @@ -9307,43 +9607,59 @@ void AllocatorDebugInfo::ReallocateData(data_ptr_t pointer, data_ptr_t new_point -struct ArrowSchema; namespace duckdb { -struct ArrowConverter { - DUCKDB_API static void ToArrowSchema(ArrowSchema *out_schema, vector &types, vector &names, - string &config_timezone); - DUCKDB_API static void ToArrowArray(DataChunk &input, ArrowArray *out_array); +//! The CheckConstraint contains an expression that must evaluate to TRUE for +//! every row in a table +class CheckConstraint : public Constraint { +public: + DUCKDB_API explicit CheckConstraint(unique_ptr expression); + + unique_ptr expression; + +public: + DUCKDB_API string ToString() const override; + + DUCKDB_API unique_ptr Copy() const override; + + DUCKDB_API void Serialize(FieldWriter &writer) const override; + DUCKDB_API static unique_ptr Deserialize(FieldReader &source); }; } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/constraints/not_null_constraint.hpp +// +// +//===----------------------------------------------------------------------===// + + + -struct ArrowSchema; namespace duckdb { -struct ArrowAppendData; +class NotNullConstraint : public Constraint { +public: + DUCKDB_API explicit NotNullConstraint(LogicalIndex index); + DUCKDB_API ~NotNullConstraint() override; + + //! Column index this constraint pertains to + LogicalIndex index; -//! The ArrowAppender class can be used to incrementally construct an arrow array by appending data chunks into it -class ArrowAppender { public: - DUCKDB_API ArrowAppender(vector types, idx_t initial_capacity); - DUCKDB_API ~ArrowAppender(); + DUCKDB_API string ToString() const override; - //! Append a data chunk to the underlying arrow array - DUCKDB_API void Append(DataChunk &input); - //! Returns the underlying arrow array - DUCKDB_API ArrowArray Finalize(); + DUCKDB_API unique_ptr Copy() const override; -private: - //! The types of the chunks that will be appended in - vector types; - //! The root arrow append data - vector> root_data; - //! The total row count that has been appended - idx_t row_count = 0; + //! Serialize to a stand-alone binary blob + DUCKDB_API void Serialize(FieldWriter &writer) const override; + //! Deserializes a NotNullConstraint + DUCKDB_API static unique_ptr Deserialize(FieldReader &source); }; } // namespace duckdb @@ -9351,7 +9667,7 @@ class ArrowAppender { //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/common/arrow/arrow_buffer.hpp +// duckdb/parser/constraints/unique_constraint.hpp // // //===----------------------------------------------------------------------===// @@ -9360,849 +9676,367 @@ class ArrowAppender { -struct ArrowSchema; namespace duckdb { -struct ArrowBuffer { - static constexpr const idx_t MINIMUM_SHRINK_SIZE = 4096; - - ArrowBuffer() : dataptr(nullptr), count(0), capacity(0) { - } - ~ArrowBuffer() { - if (!dataptr) { - return; - } - free(dataptr); - dataptr = nullptr; - count = 0; - capacity = 0; - } - // disable copy constructors - ArrowBuffer(const ArrowBuffer &other) = delete; - ArrowBuffer &operator=(const ArrowBuffer &) = delete; - //! enable move constructors - ArrowBuffer(ArrowBuffer &&other) noexcept { - std::swap(dataptr, other.dataptr); - std::swap(count, other.count); - std::swap(capacity, other.capacity); - } - ArrowBuffer &operator=(ArrowBuffer &&other) noexcept { - std::swap(dataptr, other.dataptr); - std::swap(count, other.count); - std::swap(capacity, other.capacity); - return *this; - } +class UniqueConstraint : public Constraint { +public: + DUCKDB_API UniqueConstraint(LogicalIndex index, bool is_primary_key); + DUCKDB_API UniqueConstraint(vector columns, bool is_primary_key); - void reserve(idx_t bytes) { // NOLINT - auto new_capacity = NextPowerOfTwo(bytes); - if (new_capacity <= capacity) { - return; - } - ReserveInternal(new_capacity); - } + //! The index of the column for which this constraint holds. Only used when the constraint relates to a single + //! column, equal to DConstants::INVALID_INDEX if not used + LogicalIndex index; + //! The set of columns for which this constraint holds by name. Only used when the index field is not used. + vector columns; + //! Whether or not this is a PRIMARY KEY constraint, or a UNIQUE constraint. + bool is_primary_key; - void resize(idx_t bytes) { // NOLINT - reserve(bytes); - count = bytes; - } +public: + DUCKDB_API string ToString() const override; - void resize(idx_t bytes, data_t value) { // NOLINT - reserve(bytes); - for (idx_t i = count; i < bytes; i++) { - dataptr[i] = value; - } - count = bytes; - } + DUCKDB_API unique_ptr Copy() const override; - idx_t size() { // NOLINT - return count; - } + //! Serialize to a stand-alone binary blob + DUCKDB_API void Serialize(FieldWriter &writer) const override; + //! Deserializes a ParsedConstraint + DUCKDB_API static unique_ptr Deserialize(FieldReader &source); +}; - data_ptr_t data() { // NOLINT - return dataptr; - } +} // namespace duckdb -private: - void ReserveInternal(idx_t bytes) { - if (dataptr) { - dataptr = (data_ptr_t)realloc(dataptr, bytes); - } else { - dataptr = (data_ptr_t)malloc(bytes); - } - capacity = bytes; - } -private: - data_ptr_t dataptr = nullptr; - idx_t count = 0; - idx_t capacity = 0; -}; -} // namespace duckdb //===----------------------------------------------------------------------===// // DuckDB // -// duckdb/common/array.hpp +// duckdb/parser/parsed_expression_iterator.hpp // // //===----------------------------------------------------------------------===// -#include - -namespace duckdb { -using std::array; -} +#include namespace duckdb { -//===--------------------------------------------------------------------===// -// Arrow append data -//===--------------------------------------------------------------------===// -typedef void (*initialize_t)(ArrowAppendData &result, const LogicalType &type, idx_t capacity); -typedef void (*append_vector_t)(ArrowAppendData &append_data, Vector &input, idx_t size); -typedef void (*finalize_t)(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result); - -struct ArrowAppendData { - // the buffers of the arrow vector - ArrowBuffer validity; - ArrowBuffer main_buffer; - ArrowBuffer aux_buffer; - - idx_t row_count = 0; - idx_t null_count = 0; - - // function pointers for construction - initialize_t initialize = nullptr; - append_vector_t append_vector = nullptr; - finalize_t finalize = nullptr; +class ParsedExpressionIterator { +public: + static void EnumerateChildren(const ParsedExpression &expression, + const std::function &callback); + static void EnumerateChildren(ParsedExpression &expr, const std::function &callback); + static void EnumerateChildren(ParsedExpression &expr, + const std::function &child)> &callback); - // child data (if any) - vector> child_data; + static void EnumerateTableRefChildren(TableRef &ref, + const std::function &child)> &callback); + static void EnumerateQueryNodeChildren(QueryNode &node, + const std::function &child)> &callback); - //! the arrow array C API data, only set after Finalize - unique_ptr array; - duckdb::array buffers = {{nullptr, nullptr, nullptr}}; - vector child_pointers; + static void EnumerateQueryNodeModifiers(QueryNode &node, + const std::function &child)> &callback); }; -//===--------------------------------------------------------------------===// -// ArrowAppender -//===--------------------------------------------------------------------===// -static unique_ptr InitializeArrowChild(const LogicalType &type, idx_t capacity); -static ArrowArray *FinalizeArrowChild(const LogicalType &type, ArrowAppendData &append_data); +} // namespace duckdb -ArrowAppender::ArrowAppender(vector types_p, idx_t initial_capacity) : types(move(types_p)) { - for (auto &type : types) { - auto entry = InitializeArrowChild(type, initial_capacity); - root_data.push_back(move(entry)); - } -} -ArrowAppender::~ArrowAppender() { -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/constraints/bound_check_constraint.hpp +// +// +//===----------------------------------------------------------------------===// -//===--------------------------------------------------------------------===// -// Append Helper Functions -//===--------------------------------------------------------------------===// -static void GetBitPosition(idx_t row_idx, idx_t ¤t_byte, uint8_t ¤t_bit) { - current_byte = row_idx / 8; - current_bit = row_idx % 8; -} -static void UnsetBit(uint8_t *data, idx_t current_byte, uint8_t current_bit) { - data[current_byte] &= ~((uint64_t)1 << current_bit); -} -static void NextBit(idx_t ¤t_byte, uint8_t ¤t_bit) { - current_bit++; - if (current_bit == 8) { - current_byte++; - current_bit = 0; - } -} -static void ResizeValidity(ArrowBuffer &buffer, idx_t row_count) { - auto byte_count = (row_count + 7) / 8; - buffer.resize(byte_count, 0xFF); -} -static void SetNull(ArrowAppendData &append_data, uint8_t *validity_data, idx_t current_byte, uint8_t current_bit) { - UnsetBit(validity_data, current_byte, current_bit); - append_data.null_count++; -} -static void AppendValidity(ArrowAppendData &append_data, UnifiedVectorFormat &format, idx_t size) { - // resize the buffer, filling the validity buffer with all valid values - ResizeValidity(append_data.validity, append_data.row_count + size); - if (format.validity.AllValid()) { - // if all values are valid we don't need to do anything else - return; - } - // otherwise we iterate through the validity mask - auto validity_data = (uint8_t *)append_data.validity.data(); - uint8_t current_bit; - idx_t current_byte; - GetBitPosition(append_data.row_count, current_byte, current_bit); - for (idx_t i = 0; i < size; i++) { - auto source_idx = format.sel->get_index(i); - // append the validity mask - if (!format.validity.RowIsValid(source_idx)) { - SetNull(append_data, validity_data, current_byte, current_bit); - } - NextBit(current_byte, current_bit); - } -} -//===--------------------------------------------------------------------===// -// Scalar Types -//===--------------------------------------------------------------------===// -struct ArrowScalarConverter { - template - static TGT Operation(SRC input) { - return input; - } +namespace duckdb { - static bool SkipNulls() { - return false; +//! The CheckConstraint contains an expression that must evaluate to TRUE for +//! every row in a table +class BoundCheckConstraint : public BoundConstraint { +public: + BoundCheckConstraint() : BoundConstraint(ConstraintType::CHECK) { } - template - static void SetNull(TGT &value) { - } + //! The expression + unique_ptr expression; + //! The columns used by the CHECK constraint + physical_index_set_t bound_columns; }; -struct ArrowIntervalConverter { - template - static TGT Operation(SRC input) { - return Interval::GetMilli(input); - } +} // namespace duckdb - static bool SkipNulls() { - return true; - } - template - static void SetNull(TGT &value) { - value = 0; - } -}; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/constraints/bound_not_null_constraint.hpp +// +// +//===----------------------------------------------------------------------===// -template -struct ArrowScalarBaseData { - static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { - UnifiedVectorFormat format; - input.ToUnifiedFormat(size, format); - // append the validity mask - AppendValidity(append_data, format, size); - // append the main data - append_data.main_buffer.resize(append_data.main_buffer.size() + sizeof(TGT) * size); - auto data = (SRC *)format.data; - auto result_data = (TGT *)append_data.main_buffer.data(); - for (idx_t i = 0; i < size; i++) { - auto source_idx = format.sel->get_index(i); - auto result_idx = append_data.row_count + i; - if (OP::SkipNulls() && !format.validity.RowIsValid(source_idx)) { - OP::template SetNull(result_data[result_idx]); - continue; - } - result_data[result_idx] = OP::template Operation(data[source_idx]); - } - append_data.row_count += size; - } -}; +namespace duckdb { -template -struct ArrowScalarData : public ArrowScalarBaseData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - result.main_buffer.reserve(capacity * sizeof(TGT)); +class BoundNotNullConstraint : public BoundConstraint { +public: + explicit BoundNotNullConstraint(PhysicalIndex index) : BoundConstraint(ConstraintType::NOT_NULL), index(index) { } - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - result->n_buffers = 2; - result->buffers[1] = append_data.main_buffer.data(); - } + //! Column index this constraint pertains to + PhysicalIndex index; }; -//===--------------------------------------------------------------------===// -// Enums -//===--------------------------------------------------------------------===// -template -struct ArrowEnumData : public ArrowScalarBaseData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - result.main_buffer.reserve(capacity * sizeof(TGT)); - // construct the enum child data - auto enum_data = InitializeArrowChild(LogicalType::VARCHAR, EnumType::GetSize(type)); - enum_data->append_vector(*enum_data, EnumType::GetValuesInsertOrder(type), EnumType::GetSize(type)); - result.child_data.push_back(move(enum_data)); - } +} // namespace duckdb - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - result->n_buffers = 2; - result->buffers[1] = append_data.main_buffer.data(); - // finalize the enum child data, and assign it to the dictionary - result->dictionary = FinalizeArrowChild(LogicalType::VARCHAR, *append_data.child_data[0]); - } -}; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/constraints/bound_unique_constraint.hpp +// +// +//===----------------------------------------------------------------------===// -//===--------------------------------------------------------------------===// -// Boolean -//===--------------------------------------------------------------------===// -struct ArrowBoolData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - auto byte_count = (capacity + 7) / 8; - result.main_buffer.reserve(byte_count); - } - static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { - UnifiedVectorFormat format; - input.ToUnifiedFormat(size, format); - // we initialize both the validity and the bit set to 1's - ResizeValidity(append_data.validity, append_data.row_count + size); - ResizeValidity(append_data.main_buffer, append_data.row_count + size); - auto data = (bool *)format.data; - auto result_data = (uint8_t *)append_data.main_buffer.data(); - auto validity_data = (uint8_t *)append_data.validity.data(); - uint8_t current_bit; - idx_t current_byte; - GetBitPosition(append_data.row_count, current_byte, current_bit); - for (idx_t i = 0; i < size; i++) { - auto source_idx = format.sel->get_index(i); - // append the validity mask - if (!format.validity.RowIsValid(source_idx)) { - SetNull(append_data, validity_data, current_byte, current_bit); - } else if (!data[source_idx]) { - UnsetBit(result_data, current_byte, current_bit); - } - NextBit(current_byte, current_bit); + + + +namespace duckdb { + +class BoundUniqueConstraint : public BoundConstraint { +public: + BoundUniqueConstraint(vector keys, logical_index_set_t key_set, bool is_primary_key) + : BoundConstraint(ConstraintType::UNIQUE), keys(move(keys)), key_set(move(key_set)), + is_primary_key(is_primary_key) { +#ifdef DEBUG + D_ASSERT(keys.size() == key_set.size()); + for (auto &key : keys) { + D_ASSERT(key_set.find(key) != key_set.end()); } - append_data.row_count += size; +#endif } - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - result->n_buffers = 2; - result->buffers[1] = append_data.main_buffer.data(); - } + //! The keys that define the unique constraint + vector keys; + //! The same keys but stored as an unordered set + logical_index_set_t key_set; + //! Whether or not the unique constraint is a primary key + bool is_primary_key; }; -//===--------------------------------------------------------------------===// -// Varchar -//===--------------------------------------------------------------------===// -struct ArrowVarcharConverter { - template - static idx_t GetLength(SRC input) { - return input.GetSize(); - } +} // namespace duckdb - template - static void WriteData(data_ptr_t target, SRC input) { - memcpy(target, input.GetDataUnsafe(), input.GetSize()); - } -}; -struct ArrowUUIDConverter { - template - static idx_t GetLength(SRC input) { - return UUID::STRING_SIZE; - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/expression_binder/alter_binder.hpp +// +// +//===----------------------------------------------------------------------===// - template - static void WriteData(data_ptr_t target, SRC input) { - UUID::ToString(input, (char *)target); - } -}; -template -struct ArrowVarcharData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - result.main_buffer.reserve((capacity + 1) * sizeof(uint32_t)); - result.aux_buffer.reserve(capacity); - } - static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { - UnifiedVectorFormat format; - input.ToUnifiedFormat(size, format); - // resize the validity mask and set up the validity buffer for iteration - ResizeValidity(append_data.validity, append_data.row_count + size); - auto validity_data = (uint8_t *)append_data.validity.data(); - // resize the offset buffer - the offset buffer holds the offsets into the child array - append_data.main_buffer.resize(append_data.main_buffer.size() + sizeof(uint32_t) * (size + 1)); - auto data = (SRC *)format.data; - auto offset_data = (uint32_t *)append_data.main_buffer.data(); - if (append_data.row_count == 0) { - // first entry - offset_data[0] = 0; - } - // now append the string data to the auxiliary buffer - // the auxiliary buffer's length depends on the string lengths, so we resize as required - auto last_offset = offset_data[append_data.row_count]; - for (idx_t i = 0; i < size; i++) { - auto source_idx = format.sel->get_index(i); - auto offset_idx = append_data.row_count + i + 1; - if (!format.validity.RowIsValid(source_idx)) { - uint8_t current_bit; - idx_t current_byte; - GetBitPosition(append_data.row_count + i, current_byte, current_bit); - SetNull(append_data, validity_data, current_byte, current_bit); - offset_data[offset_idx] = last_offset; - continue; - } +namespace duckdb { +class TableCatalogEntry; - auto string_length = OP::GetLength(data[source_idx]); +//! The ALTER binder is responsible for binding an expression within alter statements +class AlterBinder : public ExpressionBinder { +public: + AlterBinder(Binder &binder, ClientContext &context, TableCatalogEntry &table, vector &bound_columns, + LogicalType target_type); - // append the offset data - auto current_offset = last_offset + string_length; - offset_data[offset_idx] = current_offset; + TableCatalogEntry &table; + vector &bound_columns; - // resize the string buffer if required, and write the string data - append_data.aux_buffer.resize(current_offset); - OP::WriteData(append_data.aux_buffer.data() + last_offset, data[source_idx]); +protected: + BindResult BindExpression(unique_ptr *expr_ptr, idx_t depth, + bool root_expression = false) override; - last_offset = current_offset; - } - append_data.row_count += size; - } + BindResult BindColumn(ColumnRefExpression &expr); - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - result->n_buffers = 3; - result->buffers[1] = append_data.main_buffer.data(); - result->buffers[2] = append_data.aux_buffer.data(); - } + string UnsupportedAggregateMessage() override; }; -//===--------------------------------------------------------------------===// -// Structs -//===--------------------------------------------------------------------===// -struct ArrowStructData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - auto &children = StructType::GetChildTypes(type); - for (auto &child : children) { - auto child_buffer = InitializeArrowChild(child.second, capacity); - result.child_data.push_back(move(child_buffer)); - } - } - - static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { - UnifiedVectorFormat format; - input.ToUnifiedFormat(size, format); +} // namespace duckdb - AppendValidity(append_data, format, size); - // append the children of the struct - auto &children = StructVector::GetEntries(input); - for (idx_t child_idx = 0; child_idx < children.size(); child_idx++) { - auto &child = children[child_idx]; - auto &child_data = *append_data.child_data[child_idx]; - child_data.append_vector(child_data, *child, size); - } - append_data.row_count += size; - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/planner/filter/null_filter.hpp +// +// +//===----------------------------------------------------------------------===// - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - result->n_buffers = 1; - auto &child_types = StructType::GetChildTypes(type); - append_data.child_pointers.resize(child_types.size()); - result->children = append_data.child_pointers.data(); - result->n_children = child_types.size(); - for (idx_t i = 0; i < child_types.size(); i++) { - auto &child_type = child_types[i].second; - append_data.child_pointers[i] = FinalizeArrowChild(child_type, *append_data.child_data[i]); - } - } -}; -//===--------------------------------------------------------------------===// -// Lists -//===--------------------------------------------------------------------===// -void AppendListOffsets(ArrowAppendData &append_data, UnifiedVectorFormat &format, idx_t size, - vector &child_sel) { - // resize the offset buffer - the offset buffer holds the offsets into the child array - append_data.main_buffer.resize(append_data.main_buffer.size() + sizeof(uint32_t) * (size + 1)); - auto data = (list_entry_t *)format.data; - auto offset_data = (uint32_t *)append_data.main_buffer.data(); - if (append_data.row_count == 0) { - // first entry - offset_data[0] = 0; - } - // set up the offsets using the list entries - auto last_offset = offset_data[append_data.row_count]; - for (idx_t i = 0; i < size; i++) { - auto source_idx = format.sel->get_index(i); - auto offset_idx = append_data.row_count + i + 1; - if (!format.validity.RowIsValid(source_idx)) { - offset_data[offset_idx] = last_offset; - continue; - } - // append the offset data - auto list_length = data[source_idx].length; - last_offset += list_length; - offset_data[offset_idx] = last_offset; +namespace duckdb { - for (idx_t k = 0; k < list_length; k++) { - child_sel.push_back(data[source_idx].offset + k); - } - } -} +class IsNullFilter : public TableFilter { +public: + IsNullFilter(); -struct ArrowListData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - auto &child_type = ListType::GetChildType(type); - result.main_buffer.reserve((capacity + 1) * sizeof(uint32_t)); - auto child_buffer = InitializeArrowChild(child_type, capacity); - result.child_data.push_back(move(child_buffer)); - } +public: + FilterPropagateResult CheckStatistics(BaseStatistics &stats) override; + string ToString(const string &column_name) override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(FieldReader &source); +}; - static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { - UnifiedVectorFormat format; - input.ToUnifiedFormat(size, format); +class IsNotNullFilter : public TableFilter { +public: + IsNotNullFilter(); - vector child_indices; - AppendValidity(append_data, format, size); - AppendListOffsets(append_data, format, size, child_indices); +public: + FilterPropagateResult CheckStatistics(BaseStatistics &stats) override; + string ToString(const string &column_name) override; + void Serialize(FieldWriter &writer) const override; + static unique_ptr Deserialize(FieldReader &source); +}; - // append the child vector of the list - SelectionVector child_sel(child_indices.data()); - auto &child = ListVector::GetEntry(input); - auto child_size = child_indices.size(); - child.Slice(child_sel, child_size); +} // namespace duckdb - append_data.child_data[0]->append_vector(*append_data.child_data[0], child, child_size); - append_data.row_count += size; - } - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - result->n_buffers = 2; - result->buffers[1] = append_data.main_buffer.data(); - auto &child_type = ListType::GetChildType(type); - append_data.child_pointers.resize(1); - result->children = append_data.child_pointers.data(); - result->n_children = 1; - append_data.child_pointers[0] = FinalizeArrowChild(child_type, *append_data.child_data[0]); - } -}; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/storage_manager.hpp +// +// +//===----------------------------------------------------------------------===// -//===--------------------------------------------------------------------===// -// Maps -//===--------------------------------------------------------------------===// -struct ArrowMapData { - static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { - // map types are stored in a (too) clever way - // the main buffer holds the null values and the offsets - // then we have a single child, which is a struct of the map_type, and the key_type - result.main_buffer.reserve((capacity + 1) * sizeof(uint32_t)); - auto &key_type = MapType::KeyType(type); - auto &value_type = MapType::ValueType(type); - auto internal_struct = make_unique(); - internal_struct->child_data.push_back(InitializeArrowChild(key_type, capacity)); - internal_struct->child_data.push_back(InitializeArrowChild(value_type, capacity)); - result.child_data.push_back(move(internal_struct)); - } - static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { - UnifiedVectorFormat format; - input.ToUnifiedFormat(size, format); - AppendValidity(append_data, format, size); - // maps exist as a struct of two lists, e.g. STRUCT(key VARCHAR[], value VARCHAR[]) - // since both lists are the same, arrow tries to be smart by storing the offsets only once - // we can append the offsets from any of the two children - auto &children = StructVector::GetEntries(input); - UnifiedVectorFormat child_format; - children[0]->ToUnifiedFormat(size, child_format); - vector child_indices; - AppendListOffsets(append_data, child_format, size, child_indices); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/table_io_manager.hpp +// +// +//===----------------------------------------------------------------------===// - // now we can append the children to the lists - auto &struct_entries = StructVector::GetEntries(input); - D_ASSERT(struct_entries.size() == 2); - SelectionVector child_sel(child_indices.data()); - auto &key_vector = ListVector::GetEntry(*struct_entries[0]); - auto &value_vector = ListVector::GetEntry(*struct_entries[1]); - auto list_size = child_indices.size(); - key_vector.Slice(child_sel, list_size); - value_vector.Slice(child_sel, list_size); - // perform the append - auto &struct_data = *append_data.child_data[0]; - auto &key_data = *struct_data.child_data[0]; - auto &value_data = *struct_data.child_data[1]; - key_data.append_vector(key_data, key_vector, list_size); - value_data.append_vector(value_data, value_vector, list_size); - append_data.row_count += size; - struct_data.row_count += size; - } - static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { - // set up the main map buffer - result->n_buffers = 2; - result->buffers[1] = append_data.main_buffer.data(); - // the main map buffer has a single child: a struct - append_data.child_pointers.resize(1); - result->children = append_data.child_pointers.data(); - result->n_children = 1; - append_data.child_pointers[0] = FinalizeArrowChild(type, *append_data.child_data[0]); +namespace duckdb { +class BlockManager; +class DataTable; - // now that struct has two children: the key and the value type - auto &struct_data = *append_data.child_data[0]; - auto &struct_result = append_data.child_pointers[0]; - struct_data.child_pointers.resize(2); - struct_result->n_buffers = 1; - struct_result->n_children = 2; - struct_result->length = struct_data.child_data[0]->row_count; - struct_result->children = struct_data.child_pointers.data(); +class TableIOManager { +public: + virtual ~TableIOManager() { + } - D_ASSERT(struct_data.child_data[0]->row_count == struct_data.child_data[1]->row_count); + //! Obtains a reference to the TableIOManager of a specific table + static TableIOManager &Get(DataTable &table); - auto &key_type = MapType::KeyType(type); - auto &value_type = MapType::ValueType(type); - struct_data.child_pointers[0] = FinalizeArrowChild(key_type, *struct_data.child_data[0]); - struct_data.child_pointers[1] = FinalizeArrowChild(value_type, *struct_data.child_data[1]); + //! The block manager used for managing index data + virtual BlockManager &GetIndexBlockManager() = 0; - // keys cannot have null values - if (struct_data.child_pointers[0]->null_count > 0) { - throw std::runtime_error("Arrow doesn't accept NULL keys on Maps"); - } - } + //! The block manager used for storing row group data + virtual BlockManager &GetBlockManagerForRowData() = 0; }; -//! Append a data chunk to the underlying arrow array -void ArrowAppender::Append(DataChunk &input) { - D_ASSERT(types == input.GetTypes()); - for (idx_t i = 0; i < input.ColumnCount(); i++) { - root_data[i]->append_vector(*root_data[i], input.data[i], input.size()); - } - row_count += input.size(); -} -//===--------------------------------------------------------------------===// -// Initialize Arrow Child -//===--------------------------------------------------------------------===// -template -static void InitializeFunctionPointers(ArrowAppendData &append_data) { - append_data.initialize = OP::Initialize; - append_data.append_vector = OP::Append; - append_data.finalize = OP::Finalize; -} +} // namespace duckdb -static void InitializeFunctionPointers(ArrowAppendData &append_data, const LogicalType &type) { - // handle special logical types - switch (type.id()) { - case LogicalTypeId::BOOLEAN: - InitializeFunctionPointers(append_data); - break; - case LogicalTypeId::TINYINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::SMALLINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::DATE: - case LogicalTypeId::INTEGER: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::TIME: - case LogicalTypeId::TIMESTAMP_SEC: - case LogicalTypeId::TIMESTAMP_MS: - case LogicalTypeId::TIMESTAMP: - case LogicalTypeId::TIMESTAMP_NS: - case LogicalTypeId::TIMESTAMP_TZ: - case LogicalTypeId::TIME_TZ: - case LogicalTypeId::BIGINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::HUGEINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::UTINYINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::USMALLINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::UINTEGER: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::UBIGINT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::FLOAT: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::DOUBLE: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::DECIMAL: - switch (type.InternalType()) { - case PhysicalType::INT16: - InitializeFunctionPointers>(append_data); - break; - case PhysicalType::INT32: - InitializeFunctionPointers>(append_data); - break; - case PhysicalType::INT64: - InitializeFunctionPointers>(append_data); - break; - case PhysicalType::INT128: - InitializeFunctionPointers>(append_data); - break; - default: - throw InternalException("Unsupported internal decimal type"); - } - break; - case LogicalTypeId::VARCHAR: - case LogicalTypeId::BLOB: - case LogicalTypeId::JSON: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::UUID: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::ENUM: - switch (type.InternalType()) { - case PhysicalType::UINT8: - InitializeFunctionPointers>(append_data); - break; - case PhysicalType::UINT16: - InitializeFunctionPointers>(append_data); - break; - case PhysicalType::UINT32: - InitializeFunctionPointers>(append_data); - break; - default: - throw InternalException("Unsupported internal enum type"); - } - break; - case LogicalTypeId::INTERVAL: - InitializeFunctionPointers>(append_data); - break; - case LogicalTypeId::STRUCT: - InitializeFunctionPointers(append_data); - break; - case LogicalTypeId::LIST: - InitializeFunctionPointers(append_data); - break; - case LogicalTypeId::MAP: - InitializeFunctionPointers(append_data); - break; - default: - throw InternalException("Unsupported type in DuckDB -> Arrow Conversion: %s\n", type.ToString()); - } -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/storage/write_ahead_log.hpp +// +// +//===----------------------------------------------------------------------===// -unique_ptr InitializeArrowChild(const LogicalType &type, idx_t capacity) { - auto result = make_unique(); - InitializeFunctionPointers(*result, type); - auto byte_count = (capacity + 7) / 8; - result->validity.reserve(byte_count); - result->initialize(*result, type, capacity); - return result; -} -static void ReleaseDuckDBArrowAppendArray(ArrowArray *array) { - if (!array || !array->release) { - return; - } - array->release = nullptr; - auto holder = static_cast(array->private_data); - delete holder; -} -//===--------------------------------------------------------------------===// -// Finalize Arrow Child -//===--------------------------------------------------------------------===// -ArrowArray *FinalizeArrowChild(const LogicalType &type, ArrowAppendData &append_data) { - auto result = make_unique(); - result->private_data = nullptr; - result->release = ReleaseDuckDBArrowAppendArray; - result->n_children = 0; - result->null_count = 0; - result->offset = 0; - result->dictionary = nullptr; - result->buffers = append_data.buffers.data(); - result->null_count = append_data.null_count; - result->length = append_data.row_count; - result->buffers[0] = append_data.validity.data(); +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/enums/wal_type.hpp +// +// +//===----------------------------------------------------------------------===// - if (append_data.finalize) { - append_data.finalize(append_data, type, result.get()); - } - append_data.array = move(result); - return append_data.array.get(); -} -//! Returns the underlying arrow array -ArrowArray ArrowAppender::Finalize() { - D_ASSERT(root_data.size() == types.size()); - auto root_holder = make_unique(); - ArrowArray result; - root_holder->child_pointers.resize(types.size()); - result.children = root_holder->child_pointers.data(); - result.n_children = types.size(); - // Configure root array - result.length = row_count; - result.n_children = types.size(); - result.n_buffers = 1; - result.buffers = root_holder->buffers.data(); // there is no actual buffer there since we don't have NULLs - result.offset = 0; - result.null_count = 0; // needs to be 0 - result.dictionary = nullptr; - root_holder->child_data = move(root_data); +namespace duckdb { - for (idx_t i = 0; i < root_holder->child_data.size(); i++) { - root_holder->child_pointers[i] = FinalizeArrowChild(types[i], *root_holder->child_data[i]); - } +enum class WALType : uint8_t { + INVALID = 0, + // ----------------------------- + // Catalog + // ----------------------------- + CREATE_TABLE = 1, + DROP_TABLE = 2, - // Release ownership to caller - result.private_data = root_holder.release(); - result.release = ReleaseDuckDBArrowAppendArray; - return result; -} + CREATE_SCHEMA = 3, + DROP_SCHEMA = 4, -} // namespace duckdb + CREATE_VIEW = 5, + DROP_VIEW = 6, + CREATE_SEQUENCE = 8, + DROP_SEQUENCE = 9, + SEQUENCE_VALUE = 10, + CREATE_MACRO = 11, + DROP_MACRO = 12, + CREATE_TYPE = 13, + DROP_TYPE = 14, + ALTER_INFO = 20, + CREATE_TABLE_MACRO = 21, + DROP_TABLE_MACRO = 22, + // ----------------------------- + // Data + // ----------------------------- + USE_TABLE = 25, + INSERT_TUPLE = 26, + DELETE_TUPLE = 27, + UPDATE_TUPLE = 28, + // ----------------------------- + // Flush + // ----------------------------- + CHECKPOINT = 99, + WAL_FLUSH = 100 +}; +} -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/types/sel_cache.hpp -// -// -//===----------------------------------------------------------------------===// @@ -10211,845 +10045,1088 @@ ArrowArray ArrowAppender::Finalize() { namespace duckdb { -//! Selection vector cache used for caching vector slices -struct SelCache { - unordered_map> cache; -}; - -} // namespace duckdb +struct AlterInfo; -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/types/vector_cache.hpp -// -// -//===----------------------------------------------------------------------===// +class BufferedSerializer; +class Catalog; +class DatabaseInstance; +class SchemaCatalogEntry; +class SequenceCatalogEntry; +class ScalarMacroCatalogEntry; +class ViewCatalogEntry; +class TypeCatalogEntry; +class TableCatalogEntry; +class Transaction; +class TransactionManager; +class ReplayState { +public: + ReplayState(DatabaseInstance &db, ClientContext &context, Deserializer &source) + : db(db), context(context), source(source), current_table(nullptr), deserialize_only(false), + checkpoint_id(INVALID_BLOCK) { + } + DatabaseInstance &db; + ClientContext &context; + Deserializer &source; + TableCatalogEntry *current_table; + bool deserialize_only; + block_id_t checkpoint_id; +public: + void ReplayEntry(WALType entry_type); +protected: + virtual void ReplayCreateTable(); + void ReplayDropTable(); + void ReplayAlter(); + void ReplayCreateView(); + void ReplayDropView(); + void ReplayCreateSchema(); + void ReplayDropSchema(); -namespace duckdb { -class Allocator; -class Vector; + void ReplayCreateType(); + void ReplayDropType(); -//! The VectorCache holds cached data that allows for re-use of the same memory by vectors -class VectorCache { -public: - //! Instantiate a vector cache with the given type - DUCKDB_API explicit VectorCache(Allocator &allocator, const LogicalType &type); + void ReplayCreateSequence(); + void ReplayDropSequence(); + void ReplaySequenceValue(); - buffer_ptr buffer; + void ReplayCreateMacro(); + void ReplayDropMacro(); -public: - void ResetFromCache(Vector &result) const; + void ReplayCreateTableMacro(); + void ReplayDropTableMacro(); - const LogicalType &GetType() const; + void ReplayUseTable(); + void ReplayInsert(); + void ReplayDelete(); + void ReplayUpdate(); + void ReplayCheckpoint(); }; -} // namespace duckdb +//! The WriteAheadLog (WAL) is a log that is used to provide durability. Prior +//! to committing a transaction it writes the changes the transaction made to +//! the database to the log, which can then be replayed upon startup in case the +//! server crashes or is shut down. +class WriteAheadLog { +public: + //! Initialize the WAL in the specified directory + explicit WriteAheadLog(DatabaseInstance &database, const string &path); + virtual ~WriteAheadLog(); + //! Skip writing to the WAL + bool skip_writing; +public: + //! Replay the WAL + static bool Replay(DatabaseInstance &database, string &path); -#include + //! Returns the current size of the WAL in bytes + int64_t GetWALSize(); + //! Gets the total bytes written to the WAL since startup + idx_t GetTotalWritten(); + virtual void WriteCreateTable(TableCatalogEntry *entry); + void WriteDropTable(TableCatalogEntry *entry); -namespace duckdb { + void WriteCreateSchema(SchemaCatalogEntry *entry); + void WriteDropSchema(SchemaCatalogEntry *entry); -void ArrowConverter::ToArrowArray(DataChunk &input, ArrowArray *out_array) { - ArrowAppender appender(input.GetTypes(), input.size()); - appender.Append(input); - *out_array = appender.Finalize(); -} + void WriteCreateView(ViewCatalogEntry *entry); + void WriteDropView(ViewCatalogEntry *entry); -//===--------------------------------------------------------------------===// -// Arrow Schema -//===--------------------------------------------------------------------===// -struct DuckDBArrowSchemaHolder { - // unused in children - vector children; - // unused in children - vector children_ptrs; - //! used for nested structures - std::list> nested_children; - std::list> nested_children_ptr; - //! This holds strings created to represent decimal types - vector> owned_type_names; -}; + void WriteCreateSequence(SequenceCatalogEntry *entry); + void WriteDropSequence(SequenceCatalogEntry *entry); + void WriteSequenceValue(SequenceCatalogEntry *entry, SequenceValue val); -static void ReleaseDuckDBArrowSchema(ArrowSchema *schema) { - if (!schema || !schema->release) { - return; - } - schema->release = nullptr; - auto holder = static_cast(schema->private_data); - delete holder; -} + void WriteCreateMacro(ScalarMacroCatalogEntry *entry); + void WriteDropMacro(ScalarMacroCatalogEntry *entry); -void InitializeChild(ArrowSchema &child, const string &name = "") { - //! Child is cleaned up by parent - child.private_data = nullptr; - child.release = ReleaseDuckDBArrowSchema; + void WriteCreateTableMacro(TableMacroCatalogEntry *entry); + void WriteDropTableMacro(TableMacroCatalogEntry *entry); - //! Store the child schema - child.flags = ARROW_FLAG_NULLABLE; - child.name = name.c_str(); - child.n_children = 0; - child.children = nullptr; - child.metadata = nullptr; - child.dictionary = nullptr; -} -void SetArrowFormat(DuckDBArrowSchemaHolder &root_holder, ArrowSchema &child, const LogicalType &type, - string &config_timezone); + void WriteCreateType(TypeCatalogEntry *entry); + void WriteDropType(TypeCatalogEntry *entry); + //! Sets the table used for subsequent insert/delete/update commands + void WriteSetTable(string &schema, string &table); -void SetArrowMapFormat(DuckDBArrowSchemaHolder &root_holder, ArrowSchema &child, const LogicalType &type, - string &config_timezone) { - child.format = "+m"; - //! Map has one child which is a struct - child.n_children = 1; - root_holder.nested_children.emplace_back(); - root_holder.nested_children.back().resize(1); - root_holder.nested_children_ptr.emplace_back(); - root_holder.nested_children_ptr.back().push_back(&root_holder.nested_children.back()[0]); - InitializeChild(root_holder.nested_children.back()[0]); - child.children = &root_holder.nested_children_ptr.back()[0]; - child.children[0]->name = "entries"; - child_list_t struct_child_types; - struct_child_types.push_back(std::make_pair("key", ListType::GetChildType(StructType::GetChildType(type, 0)))); - struct_child_types.push_back(std::make_pair("value", ListType::GetChildType(StructType::GetChildType(type, 1)))); - auto struct_type = LogicalType::STRUCT(move(struct_child_types)); - SetArrowFormat(root_holder, *child.children[0], struct_type, config_timezone); -} + void WriteAlter(AlterInfo &info); -void SetArrowFormat(DuckDBArrowSchemaHolder &root_holder, ArrowSchema &child, const LogicalType &type, - string &config_timezone) { - switch (type.id()) { - case LogicalTypeId::BOOLEAN: - child.format = "b"; - break; - case LogicalTypeId::TINYINT: - child.format = "c"; - break; - case LogicalTypeId::SMALLINT: - child.format = "s"; - break; - case LogicalTypeId::INTEGER: - child.format = "i"; - break; - case LogicalTypeId::BIGINT: - child.format = "l"; - break; - case LogicalTypeId::UTINYINT: - child.format = "C"; - break; - case LogicalTypeId::USMALLINT: - child.format = "S"; - break; - case LogicalTypeId::UINTEGER: - child.format = "I"; - break; - case LogicalTypeId::UBIGINT: - child.format = "L"; - break; - case LogicalTypeId::FLOAT: - child.format = "f"; - break; - case LogicalTypeId::HUGEINT: - child.format = "d:38,0"; - break; - case LogicalTypeId::DOUBLE: - child.format = "g"; - break; - case LogicalTypeId::UUID: - case LogicalTypeId::JSON: - case LogicalTypeId::VARCHAR: - child.format = "u"; - break; - case LogicalTypeId::DATE: - child.format = "tdD"; - break; - case LogicalTypeId::TIME: - case LogicalTypeId::TIME_TZ: - child.format = "ttu"; - break; - case LogicalTypeId::TIMESTAMP: - child.format = "tsu:"; - break; - case LogicalTypeId::TIMESTAMP_TZ: { - string format = "tsu:" + config_timezone; - unique_ptr format_ptr = unique_ptr(new char[format.size() + 1]); - for (size_t i = 0; i < format.size(); i++) { - format_ptr[i] = format[i]; - } - format_ptr[format.size()] = '\0'; - root_holder.owned_type_names.push_back(move(format_ptr)); - child.format = root_holder.owned_type_names.back().get(); - break; - } - case LogicalTypeId::TIMESTAMP_SEC: - child.format = "tss:"; - break; - case LogicalTypeId::TIMESTAMP_NS: - child.format = "tsn:"; - break; - case LogicalTypeId::TIMESTAMP_MS: - child.format = "tsm:"; - break; - case LogicalTypeId::INTERVAL: - child.format = "tDm"; - break; - case LogicalTypeId::DECIMAL: { - uint8_t width, scale; - type.GetDecimalProperties(width, scale); - string format = "d:" + to_string(width) + "," + to_string(scale); - unique_ptr format_ptr = unique_ptr(new char[format.size() + 1]); - for (size_t i = 0; i < format.size(); i++) { - format_ptr[i] = format[i]; - } - format_ptr[format.size()] = '\0'; - root_holder.owned_type_names.push_back(move(format_ptr)); - child.format = root_holder.owned_type_names.back().get(); - break; - } - case LogicalTypeId::SQLNULL: { - child.format = "n"; - break; - } - case LogicalTypeId::BLOB: { - child.format = "z"; - break; - } - case LogicalTypeId::LIST: { - child.format = "+l"; - child.n_children = 1; - root_holder.nested_children.emplace_back(); - root_holder.nested_children.back().resize(1); - root_holder.nested_children_ptr.emplace_back(); - root_holder.nested_children_ptr.back().push_back(&root_holder.nested_children.back()[0]); - InitializeChild(root_holder.nested_children.back()[0]); - child.children = &root_holder.nested_children_ptr.back()[0]; - child.children[0]->name = "l"; - SetArrowFormat(root_holder, **child.children, ListType::GetChildType(type), config_timezone); - break; - } - case LogicalTypeId::STRUCT: { - child.format = "+s"; - auto &child_types = StructType::GetChildTypes(type); - child.n_children = child_types.size(); - root_holder.nested_children.emplace_back(); - root_holder.nested_children.back().resize(child_types.size()); - root_holder.nested_children_ptr.emplace_back(); - root_holder.nested_children_ptr.back().resize(child_types.size()); - for (idx_t type_idx = 0; type_idx < child_types.size(); type_idx++) { - root_holder.nested_children_ptr.back()[type_idx] = &root_holder.nested_children.back()[type_idx]; - } - child.children = &root_holder.nested_children_ptr.back()[0]; - for (size_t type_idx = 0; type_idx < child_types.size(); type_idx++) { + void WriteInsert(DataChunk &chunk); + void WriteDelete(DataChunk &chunk); + //! Write a single (sub-) column update to the WAL. Chunk must be a pair of (COL, ROW_ID). + //! The column_path vector is a *path* towards a column within the table + //! i.e. if we have a table with a single column S STRUCT(A INT, B INT) + //! and we update the validity mask of "S.B" + //! the column path is: + //! 0 (first column of table) + //! -> 1 (second subcolumn of struct) + //! -> 0 (first subcolumn of INT) + void WriteUpdate(DataChunk &chunk, const vector &column_path); - InitializeChild(*child.children[type_idx]); + //! Truncate the WAL to a previous size, and clear anything currently set in the writer + void Truncate(int64_t size); + //! Delete the WAL file on disk. The WAL should not be used after this point. + void Delete(); + void Flush(); - auto &struct_col_name = child_types[type_idx].first; - unique_ptr name_ptr = unique_ptr(new char[struct_col_name.size() + 1]); - for (size_t i = 0; i < struct_col_name.size(); i++) { - name_ptr[i] = struct_col_name[i]; - } - name_ptr[struct_col_name.size()] = '\0'; - root_holder.owned_type_names.push_back(move(name_ptr)); + void WriteCheckpoint(block_id_t meta_block); - child.children[type_idx]->name = root_holder.owned_type_names.back().get(); - SetArrowFormat(root_holder, *child.children[type_idx], child_types[type_idx].second, config_timezone); - } - break; - } - case LogicalTypeId::MAP: { - SetArrowMapFormat(root_holder, child, type, config_timezone); - break; - } - case LogicalTypeId::ENUM: { - // TODO what do we do with pointer enums here? - switch (EnumType::GetPhysicalType(type)) { - case PhysicalType::UINT8: - child.format = "C"; - break; - case PhysicalType::UINT16: - child.format = "S"; - break; - case PhysicalType::UINT32: - child.format = "I"; - break; - default: - throw InternalException("Unsupported Enum Internal Type"); - } - root_holder.nested_children.emplace_back(); - root_holder.nested_children.back().resize(1); - root_holder.nested_children_ptr.emplace_back(); - root_holder.nested_children_ptr.back().push_back(&root_holder.nested_children.back()[0]); - InitializeChild(root_holder.nested_children.back()[0]); - child.dictionary = root_holder.nested_children_ptr.back()[0]; - child.dictionary->format = "u"; - break; - } - default: - throw InternalException("Unsupported Arrow type " + type.ToString()); - } -} +protected: + DatabaseInstance &database; + unique_ptr writer; + string wal_path; +}; -void ArrowConverter::ToArrowSchema(ArrowSchema *out_schema, vector &types, vector &names, - string &config_timezone) { - D_ASSERT(out_schema); - D_ASSERT(types.size() == names.size()); - idx_t column_count = types.size(); - // Allocate as unique_ptr first to cleanup properly on error - auto root_holder = make_unique(); +} // namespace duckdb - // Allocate the children - root_holder->children.resize(column_count); - root_holder->children_ptrs.resize(column_count, nullptr); - for (size_t i = 0; i < column_count; ++i) { - root_holder->children_ptrs[i] = &root_holder->children[i]; - } - out_schema->children = root_holder->children_ptrs.data(); - out_schema->n_children = column_count; - // Store the schema - out_schema->format = "+s"; // struct apparently - out_schema->flags = 0; - out_schema->metadata = nullptr; - out_schema->name = "duckdb_query_result"; - out_schema->dictionary = nullptr; +namespace duckdb { +class BlockManager; +class Catalog; +class CheckpointWriter; +class DatabaseInstance; +class TransactionManager; +class TableCatalogEntry; - // Configure all child schemas - for (idx_t col_idx = 0; col_idx < column_count; col_idx++) { +struct DatabaseSize { + idx_t total_blocks = 0; + idx_t block_size = 0; + idx_t free_blocks = 0; + idx_t used_blocks = 0; + idx_t bytes = 0; + idx_t wal_size = 0; +}; - auto &child = root_holder->children[col_idx]; - InitializeChild(child, names[col_idx]); - SetArrowFormat(*root_holder, child, types[col_idx], config_timezone); +class StorageCommitState { +public: + // Destruction of this object, without prior call to FlushCommit, + // will roll back the committed changes. + virtual ~StorageCommitState() { } - // Release ownership to caller - out_schema->private_data = root_holder.release(); - out_schema->release = ReleaseDuckDBArrowSchema; -} - -} // namespace duckdb + // Make the commit persistent + virtual void FlushCommit() = 0; +}; +//! StorageManager is responsible for managing the physical storage of the +//! database on disk +class StorageManager { +public: + StorageManager(DatabaseInstance &db, string path, bool read_only); + virtual ~StorageManager(); + //! The BufferManager of the database + unique_ptr buffer_manager; + //! The database this storagemanager belongs to + DatabaseInstance &db; +public: + static StorageManager &GetStorageManager(ClientContext &context); + static StorageManager &GetStorageManager(DatabaseInstance &db); + //! Initialize a database or load an existing database from the given path + void Initialize(); + DatabaseInstance &GetDatabase() { + return db; + } + //! Get the WAL of the StorageManager, returns nullptr if in-memory + WriteAheadLog *GetWriteAheadLog() { + return wal.get(); + } + string GetDBPath() { + return path; + } + bool InMemory(); -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/arrow/result_arrow_wrapper.hpp -// -// -//===----------------------------------------------------------------------===// + virtual bool AutomaticCheckpoint(idx_t estimated_wal_bytes) = 0; + virtual unique_ptr GenStorageCommitState(Transaction &transaction, bool checkpoint) = 0; + virtual bool IsCheckpointClean(block_id_t checkpoint_id) = 0; + virtual void CreateCheckpoint(bool delete_wal = false, bool force_checkpoint = false) = 0; + virtual DatabaseSize GetDatabaseSize() = 0; + virtual shared_ptr GetTableIOManager(BoundCreateTableInfo *info) = 0; +protected: + virtual void LoadDatabase() = 0; + virtual void CreateBufferManager(); + //! The path of the database + string path; + //! The WriteAheadLog of the storage manager + unique_ptr wal; + //! Whether or not the database is opened in read-only mode + bool read_only; +}; +//! Stores database in a single file. +class SingleFileStorageManager : public StorageManager { +public: + SingleFileStorageManager(DatabaseInstance &db, string path, bool read_only); + //! The BlockManager to read/store meta information and data in blocks + unique_ptr block_manager; + //! TableIoManager + unique_ptr table_io_manager; -namespace duckdb { -class ResultArrowArrayStreamWrapper { public: - explicit ResultArrowArrayStreamWrapper(unique_ptr result, idx_t batch_size); - ArrowArrayStream stream; - unique_ptr result; - PreservedError last_error; - idx_t batch_size; - vector column_types; - vector column_names; - string timezone_config; + bool AutomaticCheckpoint(idx_t estimated_wal_bytes) override; + unique_ptr GenStorageCommitState(Transaction &transaction, bool checkpoint) override; + bool IsCheckpointClean(block_id_t checkpoint_id) override; + void CreateCheckpoint(bool delete_wal, bool force_checkpoint) override; + DatabaseSize GetDatabaseSize() override; + shared_ptr GetTableIOManager(BoundCreateTableInfo *info) override; -private: - static int MyStreamGetSchema(struct ArrowArrayStream *stream, struct ArrowSchema *out); - static int MyStreamGetNext(struct ArrowArrayStream *stream, struct ArrowArray *out); - static void MyStreamRelease(struct ArrowArrayStream *stream); - static const char *MyStreamGetLastError(struct ArrowArrayStream *stream); +protected: + void LoadDatabase() override; }; } // namespace duckdb +#include namespace duckdb { -ArrowSchemaWrapper::~ArrowSchemaWrapper() { - if (arrow_schema.release) { - for (int64_t child_idx = 0; child_idx < arrow_schema.n_children; child_idx++) { - auto &child = *arrow_schema.children[child_idx]; - if (child.release) { - child.release(&child); - } - } - arrow_schema.release(&arrow_schema); - arrow_schema.release = nullptr; - } +bool TableCatalogEntry::HasGeneratedColumns() const { + return columns.LogicalColumnCount() != columns.PhysicalColumnCount(); } -ArrowArrayWrapper::~ArrowArrayWrapper() { - if (arrow_array.release) { - for (int64_t child_idx = 0; child_idx < arrow_array.n_children; child_idx++) { - auto &child = *arrow_array.children[child_idx]; - if (child.release) { - child.release(&child); - } +LogicalIndex TableCatalogEntry::GetColumnIndex(string &column_name, bool if_exists) { + auto entry = columns.GetColumnIndex(column_name); + if (!entry.IsValid()) { + if (if_exists) { + return entry; } - arrow_array.release(&arrow_array); - arrow_array.release = nullptr; + throw BinderException("Table \"%s\" does not have a column with name \"%s\"", name, column_name); } + return entry; } -ArrowArrayStreamWrapper::~ArrowArrayStreamWrapper() { - if (arrow_array_stream.release) { - arrow_array_stream.release(&arrow_array_stream); - arrow_array_stream.release = nullptr; +void AddDataTableIndex(DataTable *storage, const ColumnList &columns, const vector &keys, + IndexConstraintType constraint_type, BlockPointer *index_block = nullptr) { + // fetch types and create expressions for the index from the columns + vector column_ids; + vector> unbound_expressions; + vector> bound_expressions; + idx_t key_nr = 0; + column_ids.reserve(keys.size()); + for (auto &physical_key : keys) { + auto &column = columns.GetColumn(physical_key); + D_ASSERT(!column.Generated()); + unbound_expressions.push_back( + make_unique(column.Name(), column.Type(), ColumnBinding(0, column_ids.size()))); + + bound_expressions.push_back(make_unique(column.Type(), key_nr++)); + column_ids.push_back(column.StorageOid()); + } + unique_ptr art; + // create an adaptive radix tree around the expressions + if (index_block) { + art = make_unique(column_ids, TableIOManager::Get(*storage), move(unbound_expressions), constraint_type, + storage->db, index_block->block_id, index_block->offset); + } else { + art = make_unique(column_ids, TableIOManager::Get(*storage), move(unbound_expressions), constraint_type, + storage->db); + if (!storage->IsRoot()) { + throw TransactionException("Transaction conflict: cannot add an index to a table that has been altered!"); + } } + storage->info->indexes.AddIndex(move(art)); } -void ArrowArrayStreamWrapper::GetSchema(ArrowSchemaWrapper &schema) { - D_ASSERT(arrow_array_stream.get_schema); - // LCOV_EXCL_START - if (arrow_array_stream.get_schema(&arrow_array_stream, &schema.arrow_schema)) { - throw InvalidInputException("arrow_scan: get_schema failed(): %s", string(GetError())); - } - if (!schema.arrow_schema.release) { - throw InvalidInputException("arrow_scan: released schema passed"); - } - if (schema.arrow_schema.n_children < 1) { - throw InvalidInputException("arrow_scan: empty schema passed"); +void AddDataTableIndex(DataTable *storage, const ColumnList &columns, vector &keys, + IndexConstraintType constraint_type, BlockPointer *index_block = nullptr) { + vector new_keys; + new_keys.reserve(keys.size()); + for (auto &logical_key : keys) { + new_keys.push_back(columns.LogicalToPhysical(logical_key)); } - // LCOV_EXCL_STOP + AddDataTableIndex(storage, columns, new_keys, constraint_type, index_block); } -shared_ptr ArrowArrayStreamWrapper::GetNextChunk() { - auto current_chunk = make_shared(); - if (arrow_array_stream.get_next(&arrow_array_stream, ¤t_chunk->arrow_array)) { // LCOV_EXCL_START - throw InvalidInputException("arrow_scan: get_next failed(): %s", string(GetError())); - } // LCOV_EXCL_STOP +TableCatalogEntry::TableCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, BoundCreateTableInfo *info, + std::shared_ptr inherited_storage) + : StandardEntry(CatalogType::TABLE_ENTRY, schema, catalog, info->Base().table), storage(move(inherited_storage)), + columns(move(info->Base().columns)), constraints(move(info->Base().constraints)), + bound_constraints(move(info->bound_constraints)), + column_dependency_manager(move(info->column_dependency_manager)) { + this->temporary = info->Base().temporary; + if (!storage) { + // create the physical storage + vector storage_columns; + for (auto &col_def : columns.Physical()) { + storage_columns.push_back(col_def.Copy()); + } + storage = + make_shared(catalog->db, StorageManager::GetStorageManager(catalog->db).GetTableIOManager(info), + schema->name, name, move(storage_columns), move(info->data)); - return current_chunk; + // create the unique indexes for the UNIQUE and PRIMARY KEY and FOREIGN KEY constraints + idx_t indexes_idx = 0; + for (idx_t i = 0; i < bound_constraints.size(); i++) { + auto &constraint = bound_constraints[i]; + if (constraint->type == ConstraintType::UNIQUE) { + // unique constraint: create a unique index + auto &unique = (BoundUniqueConstraint &)*constraint; + IndexConstraintType constraint_type = IndexConstraintType::UNIQUE; + if (unique.is_primary_key) { + constraint_type = IndexConstraintType::PRIMARY; + } + if (info->indexes.empty()) { + AddDataTableIndex(storage.get(), columns, unique.keys, constraint_type); + } else { + AddDataTableIndex(storage.get(), columns, unique.keys, constraint_type, + &info->indexes[indexes_idx++]); + } + } else if (constraint->type == ConstraintType::FOREIGN_KEY) { + // foreign key constraint: create a foreign key index + auto &bfk = (BoundForeignKeyConstraint &)*constraint; + if (bfk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE || + bfk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { + if (info->indexes.empty()) { + AddDataTableIndex(storage.get(), columns, bfk.info.fk_keys, IndexConstraintType::FOREIGN); + } else { + AddDataTableIndex(storage.get(), columns, bfk.info.fk_keys, IndexConstraintType::FOREIGN, + &info->indexes[indexes_idx++]); + } + } + } + } + } } -const char *ArrowArrayStreamWrapper::GetError() { // LCOV_EXCL_START - return arrow_array_stream.get_last_error(&arrow_array_stream); -} // LCOV_EXCL_STOP +bool TableCatalogEntry::ColumnExists(const string &name) { + return columns.ColumnExists(name); +} -int ResultArrowArrayStreamWrapper::MyStreamGetSchema(struct ArrowArrayStream *stream, struct ArrowSchema *out) { - if (!stream->release) { - return -1; +unique_ptr TableCatalogEntry::GetStatistics(ClientContext &context, column_t column_id) { + if (column_id == COLUMN_IDENTIFIER_ROW_ID) { + return nullptr; } - auto my_stream = (ResultArrowArrayStreamWrapper *)stream->private_data; - if (!my_stream->column_types.empty()) { - ArrowConverter::ToArrowSchema(out, my_stream->column_types, my_stream->column_names, - my_stream->timezone_config); - return 0; + auto &column = columns.GetColumn(LogicalIndex(column_id)); + if (column.Generated()) { + return nullptr; } + return storage->GetStatistics(context, column.StorageOid()); +} - auto &result = *my_stream->result; - if (result.HasError()) { - my_stream->last_error = result.GetErrorObject(); - return -1; +unique_ptr TableCatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { + D_ASSERT(!internal); + if (info->type != AlterType::ALTER_TABLE) { + throw CatalogException("Can only modify table with ALTER TABLE statement"); } - if (result.type == QueryResultType::STREAM_RESULT) { - auto &stream_result = (StreamQueryResult &)result; - if (!stream_result.IsOpen()) { - my_stream->last_error = PreservedError("Query Stream is closed"); - return -1; - } + auto table_info = (AlterTableInfo *)info; + switch (table_info->alter_table_type) { + case AlterTableType::RENAME_COLUMN: { + auto rename_info = (RenameColumnInfo *)table_info; + return RenameColumn(context, *rename_info); } - if (my_stream->column_types.empty()) { - my_stream->column_types = result.types; - my_stream->column_names = result.names; + case AlterTableType::RENAME_TABLE: { + auto rename_info = (RenameTableInfo *)table_info; + auto copied_table = Copy(context); + copied_table->name = rename_info->new_table_name; + storage->info->table = rename_info->new_table_name; + return copied_table; } - ArrowConverter::ToArrowSchema(out, my_stream->column_types, my_stream->column_names, my_stream->timezone_config); - return 0; -} - -int ResultArrowArrayStreamWrapper::MyStreamGetNext(struct ArrowArrayStream *stream, struct ArrowArray *out) { - if (!stream->release) { - return -1; + case AlterTableType::ADD_COLUMN: { + auto add_info = (AddColumnInfo *)table_info; + return AddColumn(context, *add_info); } - auto my_stream = (ResultArrowArrayStreamWrapper *)stream->private_data; - auto &result = *my_stream->result; - if (result.HasError()) { - my_stream->last_error = result.GetErrorObject(); - return -1; + case AlterTableType::REMOVE_COLUMN: { + auto remove_info = (RemoveColumnInfo *)table_info; + return RemoveColumn(context, *remove_info); } - if (result.type == QueryResultType::STREAM_RESULT) { - auto &stream_result = (StreamQueryResult &)result; - if (!stream_result.IsOpen()) { - // Nothing to output - out->release = nullptr; - return 0; - } + case AlterTableType::SET_DEFAULT: { + auto set_default_info = (SetDefaultInfo *)table_info; + return SetDefault(context, *set_default_info); } - if (my_stream->column_types.empty()) { - my_stream->column_types = result.types; - my_stream->column_names = result.names; + case AlterTableType::ALTER_COLUMN_TYPE: { + auto change_type_info = (ChangeColumnTypeInfo *)table_info; + return ChangeColumnType(context, *change_type_info); } - idx_t result_count; - PreservedError error; - if (!ArrowUtil::TryFetchChunk(&result, my_stream->batch_size, out, result_count, error)) { - D_ASSERT(error); - my_stream->last_error = error; - return -1; + case AlterTableType::FOREIGN_KEY_CONSTRAINT: { + auto foreign_key_constraint_info = (AlterForeignKeyInfo *)table_info; + if (foreign_key_constraint_info->type == AlterForeignKeyType::AFT_ADD) { + return AddForeignKeyConstraint(context, *foreign_key_constraint_info); + } else { + return DropForeignKeyConstraint(context, *foreign_key_constraint_info); + } } - if (result_count == 0) { - // Nothing to output - out->release = nullptr; + case AlterTableType::SET_NOT_NULL: { + auto set_not_null_info = (SetNotNullInfo *)table_info; + return SetNotNull(context, *set_not_null_info); } - return 0; -} - -void ResultArrowArrayStreamWrapper::MyStreamRelease(struct ArrowArrayStream *stream) { - if (!stream->release) { - return; + case AlterTableType::DROP_NOT_NULL: { + auto drop_not_null_info = (DropNotNullInfo *)table_info; + return DropNotNull(context, *drop_not_null_info); } - stream->release = nullptr; - delete (ResultArrowArrayStreamWrapper *)stream->private_data; -} - -const char *ResultArrowArrayStreamWrapper::MyStreamGetLastError(struct ArrowArrayStream *stream) { - if (!stream->release) { - return "stream was released"; + default: + throw InternalException("Unrecognized alter table type!"); } - D_ASSERT(stream->private_data); - auto my_stream = (ResultArrowArrayStreamWrapper *)stream->private_data; - return my_stream->last_error.Message().c_str(); } -ResultArrowArrayStreamWrapper::ResultArrowArrayStreamWrapper(unique_ptr result_p, idx_t batch_size_p) - : result(move(result_p)) { - //! We first initialize the private data of the stream - stream.private_data = this; - //! Ceil Approx_Batch_Size/STANDARD_VECTOR_SIZE - if (batch_size_p == 0) { - throw std::runtime_error("Approximate Batch Size of Record Batch MUST be higher than 0"); +static void RenameExpression(ParsedExpression &expr, RenameColumnInfo &info) { + if (expr.type == ExpressionType::COLUMN_REF) { + auto &colref = (ColumnRefExpression &)expr; + if (colref.column_names.back() == info.old_name) { + colref.column_names.back() = info.new_name; + } } - batch_size = batch_size_p; - //! We initialize the stream functions - stream.get_schema = ResultArrowArrayStreamWrapper::MyStreamGetSchema; - stream.get_next = ResultArrowArrayStreamWrapper::MyStreamGetNext; - stream.release = ResultArrowArrayStreamWrapper::MyStreamRelease; - stream.get_last_error = ResultArrowArrayStreamWrapper::MyStreamGetLastError; + ParsedExpressionIterator::EnumerateChildren( + expr, [&](const ParsedExpression &child) { RenameExpression((ParsedExpression &)child, info); }); } -bool ArrowUtil::TryFetchNext(QueryResult &result, unique_ptr &chunk, PreservedError &error) { - if (result.type == QueryResultType::STREAM_RESULT) { - auto &stream_result = (StreamQueryResult &)result; - if (!stream_result.IsOpen()) { - return true; +unique_ptr TableCatalogEntry::RenameColumn(ClientContext &context, RenameColumnInfo &info) { + auto rename_idx = GetColumnIndex(info.old_name); + if (rename_idx.index == COLUMN_IDENTIFIER_ROW_ID) { + throw CatalogException("Cannot rename rowid column"); + } + auto create_info = make_unique(schema->name, name); + create_info->temporary = temporary; + for (auto &col : columns.Logical()) { + auto copy = col.Copy(); + if (rename_idx == col.Logical()) { + copy.SetName(info.new_name); } + if (col.Generated() && column_dependency_manager.IsDependencyOf(col.Logical(), rename_idx)) { + RenameExpression(copy.GeneratedExpressionMutable(), info); + } + create_info->columns.AddColumn(move(copy)); } - return result.TryFetch(chunk, error); -} - -bool ArrowUtil::TryFetchChunk(QueryResult *result, idx_t chunk_size, ArrowArray *out, idx_t &count, - PreservedError &error) { - count = 0; - ArrowAppender appender(result->types, chunk_size); - while (count < chunk_size) { - unique_ptr data_chunk; - if (!TryFetchNext(*result, data_chunk, error)) { - if (result->HasError()) { - error = result->GetErrorObject(); + for (idx_t c_idx = 0; c_idx < constraints.size(); c_idx++) { + auto copy = constraints[c_idx]->Copy(); + switch (copy->type) { + case ConstraintType::NOT_NULL: + // NOT NULL constraint: no adjustments necessary + break; + case ConstraintType::CHECK: { + // CHECK constraint: need to rename column references that refer to the renamed column + auto &check = (CheckConstraint &)*copy; + RenameExpression(*check.expression, info); + break; + } + case ConstraintType::UNIQUE: { + // UNIQUE constraint: possibly need to rename columns + auto &unique = (UniqueConstraint &)*copy; + for (idx_t i = 0; i < unique.columns.size(); i++) { + if (unique.columns[i] == info.old_name) { + unique.columns[i] = info.new_name; + } } - return false; + break; } - if (!data_chunk || data_chunk->size() == 0) { + case ConstraintType::FOREIGN_KEY: { + // FOREIGN KEY constraint: possibly need to rename columns + auto &fk = (ForeignKeyConstraint &)*copy; + vector columns = fk.pk_columns; + if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { + columns = fk.fk_columns; + } else if (fk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { + for (idx_t i = 0; i < fk.fk_columns.size(); i++) { + columns.push_back(fk.fk_columns[i]); + } + } + for (idx_t i = 0; i < columns.size(); i++) { + if (columns[i] == info.old_name) { + throw CatalogException( + "Cannot rename column \"%s\" because this is involved in the foreign key constraint", + info.old_name); + } + } break; } - count += data_chunk->size(); - appender.Append(*data_chunk); - } - if (count > 0) { - *out = appender.Finalize(); - } - return true; -} - -idx_t ArrowUtil::FetchChunk(QueryResult *result, idx_t chunk_size, ArrowArray *out) { - PreservedError error; - idx_t result_count; - if (!TryFetchChunk(result, chunk_size, out, result_count, error)) { - error.Throw(); + default: + throw InternalException("Unsupported constraint for entry!"); + } + create_info->constraints.push_back(move(copy)); } - return result_count; + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); } -} // namespace duckdb - - - -namespace duckdb { +unique_ptr TableCatalogEntry::AddColumn(ClientContext &context, AddColumnInfo &info) { + auto col_name = info.new_column.GetName(); -void DuckDBAssertInternal(bool condition, const char *condition_name, const char *file, int linenr) { - if (condition) { - return; + // We're checking for the opposite condition (ADD COLUMN IF _NOT_ EXISTS ...). + if (info.if_column_not_exists && ColumnExists(col_name)) { + return nullptr; } - throw InternalException("Assertion triggered in file \"%s\" on line %d: %s", file, linenr, condition_name); -} - -} // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/checksum.hpp -// -// -//===----------------------------------------------------------------------===// - - - + auto create_info = make_unique(schema->name, name); + create_info->temporary = temporary; -namespace duckdb { + for (auto &col : columns.Logical()) { + create_info->columns.AddColumn(col.Copy()); + } + for (auto &constraint : constraints) { + create_info->constraints.push_back(constraint->Copy()); + } + Binder::BindLogicalType(context, info.new_column.TypeMutable(), schema->name); + auto col = info.new_column.Copy(); -//! Compute a checksum over a buffer of size size -uint64_t Checksum(uint8_t *buffer, size_t size); + create_info->columns.AddColumn(move(col)); -} // namespace duckdb + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + auto new_storage = + make_shared(context, *storage, info.new_column, bound_create_info->bound_defaults.back().get()); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), + new_storage); +} +unique_ptr TableCatalogEntry::RemoveColumn(ClientContext &context, RemoveColumnInfo &info) { + auto removed_index = GetColumnIndex(info.removed_column, info.if_column_exists); + if (!removed_index.IsValid()) { + if (!info.if_column_exists) { + throw CatalogException("Cannot drop column: rowid column cannot be dropped"); + } + return nullptr; + } + auto create_info = make_unique(schema->name, name); + create_info->temporary = temporary; -namespace duckdb { + logical_index_set_t removed_columns; + if (column_dependency_manager.HasDependents(removed_index)) { + removed_columns = column_dependency_manager.GetDependents(removed_index); + } + if (!removed_columns.empty() && !info.cascade) { + throw CatalogException("Cannot drop column: column is a dependency of 1 or more generated column(s)"); + } + for (auto &col : columns.Logical()) { + if (col.Logical() == removed_index || removed_columns.count(col.Logical())) { + continue; + } + create_info->columns.AddColumn(col.Copy()); + } + if (create_info->columns.empty()) { + throw CatalogException("Cannot drop column: table only has one column remaining!"); + } + auto adjusted_indices = column_dependency_manager.RemoveColumn(removed_index, columns.LogicalColumnCount()); + // handle constraints for the new table + D_ASSERT(constraints.size() == bound_constraints.size()); + for (idx_t constr_idx = 0; constr_idx < constraints.size(); constr_idx++) { + auto &constraint = constraints[constr_idx]; + auto &bound_constraint = bound_constraints[constr_idx]; + switch (constraint->type) { + case ConstraintType::NOT_NULL: { + auto ¬_null_constraint = (BoundNotNullConstraint &)*bound_constraint; + auto not_null_index = columns.PhysicalToLogical(not_null_constraint.index); + if (not_null_index != removed_index) { + // the constraint is not about this column: we need to copy it + // we might need to shift the index back by one though, to account for the removed column + auto new_index = adjusted_indices[not_null_index.index]; + create_info->constraints.push_back(make_unique(new_index)); + } + break; + } + case ConstraintType::CHECK: { + // CHECK constraint + auto &bound_check = (BoundCheckConstraint &)*bound_constraint; + // check if the removed column is part of the check constraint + auto physical_index = columns.LogicalToPhysical(removed_index); + if (bound_check.bound_columns.find(physical_index) != bound_check.bound_columns.end()) { + if (bound_check.bound_columns.size() > 1) { + // CHECK constraint that concerns mult + throw CatalogException( + "Cannot drop column \"%s\" because there is a CHECK constraint that depends on it", + info.removed_column); + } else { + // CHECK constraint that ONLY concerns this column, strip the constraint + } + } else { + // check constraint does not concern the removed column: simply re-add it + create_info->constraints.push_back(constraint->Copy()); + } + break; + } + case ConstraintType::UNIQUE: { + auto copy = constraint->Copy(); + auto &unique = (UniqueConstraint &)*copy; + if (unique.index.index != DConstants::INVALID_INDEX) { + if (unique.index == removed_index) { + throw CatalogException( + "Cannot drop column \"%s\" because there is a UNIQUE constraint that depends on it", + info.removed_column); + } + unique.index = adjusted_indices[unique.index.index]; + } + create_info->constraints.push_back(move(copy)); + break; + } + case ConstraintType::FOREIGN_KEY: { + auto copy = constraint->Copy(); + auto &fk = (ForeignKeyConstraint &)*copy; + vector columns = fk.pk_columns; + if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { + columns = fk.fk_columns; + } else if (fk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { + for (idx_t i = 0; i < fk.fk_columns.size(); i++) { + columns.push_back(fk.fk_columns[i]); + } + } + for (idx_t i = 0; i < columns.size(); i++) { + if (columns[i] == info.removed_column) { + throw CatalogException( + "Cannot drop column \"%s\" because there is a FOREIGN KEY constraint that depends on it", + info.removed_column); + } + } + create_info->constraints.push_back(move(copy)); + break; + } + default: + throw InternalException("Unsupported constraint for entry!"); + } + } -hash_t Checksum(uint64_t x) { - return x * UINT64_C(0xbf58476d1ce4e5b9); + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + if (columns.GetColumn(LogicalIndex(removed_index)).Generated()) { + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), + storage); + } + auto new_storage = + make_shared(context, *storage, columns.LogicalToPhysical(LogicalIndex(removed_index)).index); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), + new_storage); } -uint64_t Checksum(uint8_t *buffer, size_t size) { - uint64_t result = 5381; - uint64_t *ptr = (uint64_t *)buffer; - size_t i; - // for efficiency, we first checksum uint64_t values - for (i = 0; i < size / 8; i++) { - result ^= Checksum(ptr[i]); - } - if (size - i * 8 > 0) { - // the remaining 0-7 bytes we hash using a string hash - result ^= Hash(buffer + i * 8, size - i * 8); +unique_ptr TableCatalogEntry::SetDefault(ClientContext &context, SetDefaultInfo &info) { + auto create_info = make_unique(schema->name, name); + auto default_idx = GetColumnIndex(info.column_name); + if (default_idx.index == COLUMN_IDENTIFIER_ROW_ID) { + throw CatalogException("Cannot SET DEFAULT for rowid column"); } - return result; -} -} // namespace duckdb -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/compressed_file_system.hpp -// -// -//===----------------------------------------------------------------------===// + // Copy all the columns, changing the value of the one that was specified by 'column_name' + for (auto &col : columns.Logical()) { + auto copy = col.Copy(); + if (default_idx == col.Logical()) { + // set the default value of this column + if (copy.Generated()) { + throw BinderException("Cannot SET DEFAULT for generated column \"%s\"", col.Name()); + } + copy.SetDefaultValue(info.expression ? info.expression->Copy() : nullptr); + } + create_info->columns.AddColumn(move(copy)); + } + // Copy all the constraints + for (idx_t i = 0; i < constraints.size(); i++) { + auto constraint = constraints[i]->Copy(); + create_info->constraints.push_back(move(constraint)); + } + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); +} +unique_ptr TableCatalogEntry::SetNotNull(ClientContext &context, SetNotNullInfo &info) { + auto create_info = make_unique(schema->name, name); + create_info->columns = columns.Copy(); + auto not_null_idx = GetColumnIndex(info.column_name); + if (columns.GetColumn(LogicalIndex(not_null_idx)).Generated()) { + throw BinderException("Unsupported constraint for generated column!"); + } + bool has_not_null = false; + for (idx_t i = 0; i < constraints.size(); i++) { + auto constraint = constraints[i]->Copy(); + if (constraint->type == ConstraintType::NOT_NULL) { + auto ¬_null = (NotNullConstraint &)*constraint; + if (not_null.index == not_null_idx) { + has_not_null = true; + } + } + create_info->constraints.push_back(move(constraint)); + } + if (!has_not_null) { + create_info->constraints.push_back(make_unique(not_null_idx)); + } + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + // Early return + if (has_not_null) { + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), + storage); + } -namespace duckdb { -class CompressedFile; + // Return with new storage info. Note that we need the bound column index here. + auto new_storage = make_shared( + context, *storage, make_unique(columns.LogicalToPhysical(LogicalIndex(not_null_idx)))); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), + new_storage); +} -struct StreamData { - // various buffers & pointers - bool write = false; - unique_ptr in_buff; - unique_ptr out_buff; - data_ptr_t out_buff_start = nullptr; - data_ptr_t out_buff_end = nullptr; - data_ptr_t in_buff_start = nullptr; - data_ptr_t in_buff_end = nullptr; +unique_ptr TableCatalogEntry::DropNotNull(ClientContext &context, DropNotNullInfo &info) { + auto create_info = make_unique(schema->name, name); + create_info->columns = columns.Copy(); - idx_t in_buf_size = 0; - idx_t out_buf_size = 0; -}; + auto not_null_idx = GetColumnIndex(info.column_name); + for (idx_t i = 0; i < constraints.size(); i++) { + auto constraint = constraints[i]->Copy(); + // Skip/drop not_null + if (constraint->type == ConstraintType::NOT_NULL) { + auto ¬_null = (NotNullConstraint &)*constraint; + if (not_null.index == not_null_idx) { + continue; + } + } + create_info->constraints.push_back(move(constraint)); + } -struct StreamWrapper { - DUCKDB_API virtual ~StreamWrapper(); + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); +} - DUCKDB_API virtual void Initialize(CompressedFile &file, bool write) = 0; - DUCKDB_API virtual bool Read(StreamData &stream_data) = 0; - DUCKDB_API virtual void Write(CompressedFile &file, StreamData &stream_data, data_ptr_t buffer, - int64_t nr_bytes) = 0; - DUCKDB_API virtual void Close() = 0; -}; +unique_ptr TableCatalogEntry::ChangeColumnType(ClientContext &context, ChangeColumnTypeInfo &info) { + if (info.target_type.id() == LogicalTypeId::USER) { + auto &catalog = Catalog::GetCatalog(context); + info.target_type = catalog.GetType(context, schema->name, UserType::GetTypeName(info.target_type)); + } + auto change_idx = GetColumnIndex(info.column_name); + auto create_info = make_unique(schema->name, name); + create_info->temporary = temporary; -class CompressedFileSystem : public FileSystem { -public: - DUCKDB_API int64_t Read(FileHandle &handle, void *buffer, int64_t nr_bytes) override; - DUCKDB_API int64_t Write(FileHandle &handle, void *buffer, int64_t nr_bytes) override; + for (auto &col : columns.Logical()) { + auto copy = col.Copy(); + if (change_idx == col.Logical()) { + // set the type of this column + if (copy.Generated()) { + throw NotImplementedException("Changing types of generated columns is not supported yet"); + } + copy.SetType(info.target_type); + } + // TODO: check if the generated_expression breaks, only delete it if it does + if (copy.Generated() && column_dependency_manager.IsDependencyOf(col.Logical(), change_idx)) { + throw BinderException( + "This column is referenced by the generated column \"%s\", so its type can not be changed", + copy.Name()); + } + create_info->columns.AddColumn(move(copy)); + } - DUCKDB_API void Reset(FileHandle &handle) override; + for (idx_t i = 0; i < constraints.size(); i++) { + auto constraint = constraints[i]->Copy(); + switch (constraint->type) { + case ConstraintType::CHECK: { + auto &bound_check = (BoundCheckConstraint &)*bound_constraints[i]; + auto physical_index = columns.LogicalToPhysical(change_idx); + if (bound_check.bound_columns.find(physical_index) != bound_check.bound_columns.end()) { + throw BinderException("Cannot change the type of a column that has a CHECK constraint specified"); + } + break; + } + case ConstraintType::NOT_NULL: + break; + case ConstraintType::UNIQUE: { + auto &bound_unique = (BoundUniqueConstraint &)*bound_constraints[i]; + if (bound_unique.key_set.find(change_idx) != bound_unique.key_set.end()) { + throw BinderException( + "Cannot change the type of a column that has a UNIQUE or PRIMARY KEY constraint specified"); + } + break; + } + case ConstraintType::FOREIGN_KEY: { + auto &bfk = (BoundForeignKeyConstraint &)*bound_constraints[i]; + auto key_set = bfk.pk_key_set; + if (bfk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE) { + key_set = bfk.fk_key_set; + } else if (bfk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { + for (idx_t i = 0; i < bfk.info.fk_keys.size(); i++) { + key_set.insert(bfk.info.fk_keys[i]); + } + } + if (key_set.find(columns.LogicalToPhysical(change_idx)) != key_set.end()) { + throw BinderException("Cannot change the type of a column that has a FOREIGN KEY constraint specified"); + } + break; + } + default: + throw InternalException("Unsupported constraint for entry!"); + } + create_info->constraints.push_back(move(constraint)); + } - DUCKDB_API int64_t GetFileSize(FileHandle &handle) override; + auto binder = Binder::CreateBinder(context); + // bind the specified expression + vector bound_columns; + AlterBinder expr_binder(*binder, context, *this, bound_columns, info.target_type); + auto expression = info.expression->Copy(); + auto bound_expression = expr_binder.Bind(expression); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + vector storage_oids; + for (idx_t i = 0; i < bound_columns.size(); i++) { + storage_oids.push_back(columns.LogicalToPhysical(bound_columns[i]).index); + } + if (storage_oids.empty()) { + storage_oids.push_back(COLUMN_IDENTIFIER_ROW_ID); + } - DUCKDB_API bool OnDiskFile(FileHandle &handle) override; - DUCKDB_API bool CanSeek() override; + auto new_storage = + make_shared(context, *storage, columns.LogicalToPhysical(LogicalIndex(change_idx)).index, + info.target_type, move(storage_oids), *bound_expression); + auto result = + make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), new_storage); + return move(result); +} - DUCKDB_API virtual unique_ptr CreateStream() = 0; - DUCKDB_API virtual idx_t InBufferSize() = 0; - DUCKDB_API virtual idx_t OutBufferSize() = 0; -}; +unique_ptr TableCatalogEntry::AddForeignKeyConstraint(ClientContext &context, AlterForeignKeyInfo &info) { + D_ASSERT(info.type == AlterForeignKeyType::AFT_ADD); + auto create_info = make_unique(schema->name, name); + create_info->temporary = temporary; -class CompressedFile : public FileHandle { -public: - DUCKDB_API CompressedFile(CompressedFileSystem &fs, unique_ptr child_handle_p, const string &path); - DUCKDB_API virtual ~CompressedFile() override; + create_info->columns = columns.Copy(); + for (idx_t i = 0; i < constraints.size(); i++) { + create_info->constraints.push_back(constraints[i]->Copy()); + } + ForeignKeyInfo fk_info; + fk_info.type = ForeignKeyType::FK_TYPE_PRIMARY_KEY_TABLE; + fk_info.schema = info.schema; + fk_info.table = info.fk_table; + fk_info.pk_keys = info.pk_keys; + fk_info.fk_keys = info.fk_keys; + create_info->constraints.push_back( + make_unique(info.pk_columns, info.fk_columns, move(fk_info))); - CompressedFileSystem &compressed_fs; - unique_ptr child_handle; - //! Whether the file is opened for reading or for writing - bool write = false; - StreamData stream_data; + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); -public: - DUCKDB_API void Initialize(bool write); - DUCKDB_API int64_t ReadData(void *buffer, int64_t nr_bytes); - DUCKDB_API int64_t WriteData(data_ptr_t buffer, int64_t nr_bytes); - DUCKDB_API void Close() override; + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); +} -private: - unique_ptr stream_wrapper; -}; +unique_ptr TableCatalogEntry::DropForeignKeyConstraint(ClientContext &context, + AlterForeignKeyInfo &info) { + D_ASSERT(info.type == AlterForeignKeyType::AFT_DELETE); + auto create_info = make_unique(schema->name, name); + create_info->temporary = temporary; -} // namespace duckdb + create_info->columns = columns.Copy(); + for (idx_t i = 0; i < constraints.size(); i++) { + auto constraint = constraints[i]->Copy(); + if (constraint->type == ConstraintType::FOREIGN_KEY) { + ForeignKeyConstraint &fk = (ForeignKeyConstraint &)*constraint; + if (fk.info.type == ForeignKeyType::FK_TYPE_PRIMARY_KEY_TABLE && fk.info.table == info.fk_table) { + continue; + } + } + create_info->constraints.push_back(move(constraint)); + } + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); -namespace duckdb { + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); +} -StreamWrapper::~StreamWrapper() { +ColumnDefinition &TableCatalogEntry::GetColumn(const string &name) { + return columns.GetColumnMutable(name); } -CompressedFile::CompressedFile(CompressedFileSystem &fs, unique_ptr child_handle_p, const string &path) - : FileHandle(fs, path), compressed_fs(fs), child_handle(move(child_handle_p)) { +vector TableCatalogEntry::GetTypes() { + vector types; + for (auto &col : columns.Physical()) { + types.push_back(col.Type()); + } + return types; } -CompressedFile::~CompressedFile() { - Close(); +void TableCatalogEntry::Serialize(Serializer &serializer) { + D_ASSERT(!internal); + + FieldWriter writer(serializer); + writer.WriteString(schema->name); + writer.WriteString(name); + columns.Serialize(writer); + writer.WriteSerializableList(constraints); + writer.Finalize(); } -void CompressedFile::Initialize(bool write) { - Close(); +unique_ptr TableCatalogEntry::Deserialize(Deserializer &source, ClientContext &context) { + auto info = make_unique(); - this->write = write; - stream_data.in_buf_size = compressed_fs.InBufferSize(); - stream_data.out_buf_size = compressed_fs.OutBufferSize(); - stream_data.in_buff = unique_ptr(new data_t[stream_data.in_buf_size]); - stream_data.in_buff_start = stream_data.in_buff.get(); - stream_data.in_buff_end = stream_data.in_buff.get(); - stream_data.out_buff = unique_ptr(new data_t[stream_data.out_buf_size]); - stream_data.out_buff_start = stream_data.out_buff.get(); - stream_data.out_buff_end = stream_data.out_buff.get(); + FieldReader reader(source); + info->schema = reader.ReadRequired(); + info->table = reader.ReadRequired(); + info->columns = ColumnList::Deserialize(reader); + info->constraints = reader.ReadRequiredSerializableList(); + reader.Finalize(); - stream_wrapper = compressed_fs.CreateStream(); - stream_wrapper->Initialize(*this, write); + return info; } -int64_t CompressedFile::ReadData(void *buffer, int64_t remaining) { - idx_t total_read = 0; - while (true) { - // first check if there are input bytes available in the output buffers - if (stream_data.out_buff_start != stream_data.out_buff_end) { - // there is! copy it into the output buffer - idx_t available = MinValue(remaining, stream_data.out_buff_end - stream_data.out_buff_start); - memcpy(data_ptr_t(buffer) + total_read, stream_data.out_buff_start, available); +string TableCatalogEntry::ToSQL() { + std::stringstream ss; - // increment the total read variables as required - stream_data.out_buff_start += available; - total_read += available; - remaining -= available; - if (remaining == 0) { - // done! read enough - return total_read; - } - } - if (!stream_wrapper) { - return total_read; - } + ss << "CREATE TABLE "; - // ran out of buffer: read more data from the child stream - stream_data.out_buff_start = stream_data.out_buff.get(); - stream_data.out_buff_end = stream_data.out_buff.get(); - D_ASSERT(stream_data.in_buff_start <= stream_data.in_buff_end); - D_ASSERT(stream_data.in_buff_end <= stream_data.in_buff_start + stream_data.in_buf_size); + if (schema->name != DEFAULT_SCHEMA) { + ss << KeywordHelper::WriteOptionallyQuoted(schema->name) << "."; + } - // read more input if none available - if (stream_data.in_buff_start == stream_data.in_buff_end) { - // empty input buffer: refill from the start - stream_data.in_buff_start = stream_data.in_buff.get(); - stream_data.in_buff_end = stream_data.in_buff_start; - auto sz = child_handle->Read(stream_data.in_buff.get(), stream_data.in_buf_size); - if (sz <= 0) { - stream_wrapper.reset(); - break; + ss << KeywordHelper::WriteOptionallyQuoted(name) << "("; + + // find all columns that have NOT NULL specified, but are NOT primary key columns + logical_index_set_t not_null_columns; + logical_index_set_t unique_columns; + logical_index_set_t pk_columns; + unordered_set multi_key_pks; + vector extra_constraints; + for (auto &constraint : constraints) { + if (constraint->type == ConstraintType::NOT_NULL) { + auto ¬_null = (NotNullConstraint &)*constraint; + not_null_columns.insert(not_null.index); + } else if (constraint->type == ConstraintType::UNIQUE) { + auto &pk = (UniqueConstraint &)*constraint; + vector constraint_columns = pk.columns; + if (pk.index.index != DConstants::INVALID_INDEX) { + // no columns specified: single column constraint + if (pk.is_primary_key) { + pk_columns.insert(pk.index); + } else { + unique_columns.insert(pk.index); + } + } else { + // multi-column constraint, this constraint needs to go at the end after all columns + if (pk.is_primary_key) { + // multi key pk column: insert set of columns into multi_key_pks + for (auto &col : pk.columns) { + multi_key_pks.insert(col); + } + } + extra_constraints.push_back(constraint->ToString()); } - stream_data.in_buff_end = stream_data.in_buff_start + sz; + } else if (constraint->type == ConstraintType::FOREIGN_KEY) { + auto &fk = (ForeignKeyConstraint &)*constraint; + if (fk.info.type == ForeignKeyType::FK_TYPE_FOREIGN_KEY_TABLE || + fk.info.type == ForeignKeyType::FK_TYPE_SELF_REFERENCE_TABLE) { + extra_constraints.push_back(constraint->ToString()); + } + } else { + extra_constraints.push_back(constraint->ToString()); } + } - auto finished = stream_wrapper->Read(stream_data); - if (finished) { - stream_wrapper.reset(); + for (auto &column : columns.Logical()) { + if (column.Oid() > 0) { + ss << ", "; + } + ss << KeywordHelper::WriteOptionallyQuoted(column.Name()) << " "; + ss << column.Type().ToString(); + bool not_null = not_null_columns.find(column.Logical()) != not_null_columns.end(); + bool is_single_key_pk = pk_columns.find(column.Logical()) != pk_columns.end(); + bool is_multi_key_pk = multi_key_pks.find(column.Name()) != multi_key_pks.end(); + bool is_unique = unique_columns.find(column.Logical()) != unique_columns.end(); + if (not_null && !is_single_key_pk && !is_multi_key_pk) { + // NOT NULL but not a primary key column + ss << " NOT NULL"; + } + if (is_single_key_pk) { + // single column pk: insert constraint here + ss << " PRIMARY KEY"; + } + if (is_unique) { + // single column unique: insert constraint here + ss << " UNIQUE"; + } + if (column.DefaultValue()) { + ss << " DEFAULT(" << column.DefaultValue()->ToString() << ")"; + } + if (column.Generated()) { + ss << " GENERATED ALWAYS AS(" << column.GeneratedExpression().ToString() << ")"; } } - return total_read; -} + // print any extra constraints that still need to be printed + for (auto &extra_constraint : extra_constraints) { + ss << ", "; + ss << extra_constraint; + } -int64_t CompressedFile::WriteData(data_ptr_t buffer, int64_t nr_bytes) { - stream_wrapper->Write(*this, stream_data, buffer, nr_bytes); - return nr_bytes; + ss << ");"; + return ss.str(); } -void CompressedFile::Close() { - if (stream_wrapper) { - stream_wrapper->Close(); - stream_wrapper.reset(); +unique_ptr TableCatalogEntry::Copy(ClientContext &context) { + auto create_info = make_unique(schema->name, name); + create_info->columns = columns.Copy(); + + for (idx_t i = 0; i < constraints.size(); i++) { + auto constraint = constraints[i]->Copy(); + create_info->constraints.push_back(move(constraint)); } - stream_data.in_buff.reset(); - stream_data.out_buff.reset(); - stream_data.out_buff_start = nullptr; - stream_data.out_buff_end = nullptr; - stream_data.in_buff_start = nullptr; - stream_data.in_buff_end = nullptr; - stream_data.in_buf_size = 0; - stream_data.out_buf_size = 0; -} -int64_t CompressedFileSystem::Read(FileHandle &handle, void *buffer, int64_t nr_bytes) { - auto &compressed_file = (CompressedFile &)handle; - return compressed_file.ReadData(buffer, nr_bytes); + auto binder = Binder::CreateBinder(context); + auto bound_create_info = binder->BindCreateTableInfo(move(create_info)); + return make_unique(catalog, schema, (BoundCreateTableInfo *)bound_create_info.get(), storage); } -int64_t CompressedFileSystem::Write(FileHandle &handle, void *buffer, int64_t nr_bytes) { - auto &compressed_file = (CompressedFile &)handle; - return compressed_file.WriteData((data_ptr_t)buffer, nr_bytes); +void TableCatalogEntry::SetAsRoot() { + storage->SetAsRoot(); + storage->info->table = name; } -void CompressedFileSystem::Reset(FileHandle &handle) { - auto &compressed_file = (CompressedFile &)handle; - compressed_file.child_handle->Reset(); - compressed_file.Initialize(compressed_file.write); +void TableCatalogEntry::CommitAlter(AlterInfo &info) { + D_ASSERT(info.type == AlterType::ALTER_TABLE); + auto &alter_table = (AlterTableInfo &)info; + string column_name; + switch (alter_table.alter_table_type) { + case AlterTableType::REMOVE_COLUMN: { + auto &remove_info = (RemoveColumnInfo &)alter_table; + column_name = remove_info.removed_column; + break; + } + case AlterTableType::ALTER_COLUMN_TYPE: { + auto &change_info = (ChangeColumnTypeInfo &)alter_table; + column_name = change_info.column_name; + break; + } + default: + break; + } + if (column_name.empty()) { + return; + } + idx_t removed_index = DConstants::INVALID_INDEX; + for (auto &col : columns.Logical()) { + if (col.Name() == column_name) { + // No need to alter storage, removed column is generated column + if (col.Generated()) { + return; + } + removed_index = col.Oid(); + break; + } + } + D_ASSERT(removed_index != DConstants::INVALID_INDEX); + storage->CommitDropColumn(columns.LogicalToPhysical(LogicalIndex(removed_index)).index); } -int64_t CompressedFileSystem::GetFileSize(FileHandle &handle) { - auto &compressed_file = (CompressedFile &)handle; - return compressed_file.child_handle->GetFileSize(); +void TableCatalogEntry::CommitDrop() { + storage->CommitDropTable(); } -bool CompressedFileSystem::OnDiskFile(FileHandle &handle) { - auto &compressed_file = (CompressedFile &)handle; - return compressed_file.child_handle->OnDiskFile(); -} +} // namespace duckdb -bool CompressedFileSystem::CanSeek() { - return false; + + +namespace duckdb { + +TableFunctionCatalogEntry::TableFunctionCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, + CreateTableFunctionInfo *info) + : StandardEntry(CatalogType::TABLE_FUNCTION_ENTRY, schema, catalog, info->name), functions(move(info->functions)) { + D_ASSERT(this->functions.Size() > 0); } } // namespace duckdb @@ -11057,7541 +11134,6597 @@ bool CompressedFileSystem::CanSeek() { -namespace duckdb { - -constexpr const idx_t DConstants::INVALID_INDEX; -const row_t MAX_ROW_ID = 4611686018427388000ULL; // 2^62 -const column_t COLUMN_IDENTIFIER_ROW_ID = (column_t)-1; -const sel_t ZERO_VECTOR[STANDARD_VECTOR_SIZE] = {0}; -const double PI = 3.141592653589793; -const transaction_t TRANSACTION_ID_START = 4611686018427388000ULL; // 2^62 -const transaction_t MAX_TRANSACTION_ID = NumericLimits::Maximum(); // 2^63 -const transaction_t NOT_DELETED_ID = NumericLimits::Maximum() - 1; // 2^64 - 1 -const transaction_t MAXIMUM_QUERY_ID = NumericLimits::Maximum(); // 2^64 -uint64_t NextPowerOfTwo(uint64_t v) { - v--; - v |= v >> 1; - v |= v >> 2; - v |= v >> 4; - v |= v >> 8; - v |= v >> 16; - v |= v >> 32; - v++; - return v; -} -bool IsRowIdColumnId(column_t column_id) { - return column_id == COLUMN_IDENTIFIER_ROW_ID; -} -} // namespace duckdb -/* -** This code taken from the SQLite test library. Originally found on -** the internet. The original header comment follows this comment. -** The code is largerly unchanged, but there have been some modifications. -*/ -/* - * This code implements the MD5 message-digest algorithm. - * The algorithm is due to Ron Rivest. This code was - * written by Colin Plumb in 1993, no copyright is claimed. - * This code is in the public domain; do with it what you wish. - * - * Equivalent code is available from RSA Data Security, Inc. - * This code has been tested against that, and is equivalent, - * except that you don't need to include two pages of legalese - * with every copy. - * - * To compute the message digest of a chunk of bytes, declare an - * MD5Context structure, pass it to MD5Init, call MD5Update as - * needed on buffers full of bytes, and then call MD5Final, which - * will fill a supplied 16-byte array with the digest. - */ -//===----------------------------------------------------------------------===// -// DuckDB -// -// duckdb/common/crypto/md5.hpp -// -// -//===----------------------------------------------------------------------===// +#include +#include +namespace duckdb { +TypeCatalogEntry::TypeCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateTypeInfo *info) + : StandardEntry(CatalogType::TYPE_ENTRY, schema, catalog, info->name), user_type(info->type) { + this->temporary = info->temporary; + this->internal = info->internal; +} +void TypeCatalogEntry::Serialize(Serializer &serializer) { + D_ASSERT(!internal); + FieldWriter writer(serializer); + writer.WriteString(schema->name); + writer.WriteString(name); + writer.WriteSerializable(user_type); + writer.Finalize(); +} +unique_ptr TypeCatalogEntry::Deserialize(Deserializer &source) { + auto info = make_unique(); -namespace duckdb { + FieldReader reader(source); + info->schema = reader.ReadRequired(); + info->name = reader.ReadRequired(); + info->type = reader.ReadRequiredSerializable(); + reader.Finalize(); -class MD5Context { -public: - static constexpr idx_t MD5_HASH_LENGTH_BINARY = 16; - static constexpr idx_t MD5_HASH_LENGTH_TEXT = 32; + return info; +} -public: - MD5Context(); +string TypeCatalogEntry::ToSQL() { + std::stringstream ss; + switch (user_type.id()) { + case (LogicalTypeId::ENUM): { + Vector values_insert_order(EnumType::GetValuesInsertOrder(user_type)); + idx_t size = EnumType::GetSize(user_type); + ss << "CREATE TYPE "; + ss << KeywordHelper::WriteOptionallyQuoted(name); + ss << " AS ENUM ( "; - void Add(const_data_ptr_t data, idx_t len) { - MD5Update(data, len); - } - void Add(const char *data); - void Add(string_t string) { - MD5Update((const_data_ptr_t)string.GetDataUnsafe(), string.GetSize()); + for (idx_t i = 0; i < size; i++) { + ss << "'" << values_insert_order.GetValue(i).ToString() << "'"; + if (i != size - 1) { + ss << ", "; + } + } + ss << ");"; + break; } - void Add(const string &data) { - MD5Update((const_data_ptr_t)data.c_str(), data.size()); + default: + throw InternalException("Logical Type can't be used as a User Defined Type"); } - //! Write the 16-byte (binary) digest to the specified location - void Finish(data_ptr_t out_digest); - //! Write the 32-character digest (in hexadecimal format) to the specified location - void FinishHex(char *out_digest); - //! Returns the 32-character digest (in hexadecimal format) as a string - string FinishHex(); + return ss.str(); +} -private: - void MD5Update(const_data_ptr_t data, idx_t len); - static void DigestToBase16(const_data_ptr_t digest, char *zBuf); +} // namespace duckdb - uint32_t buf[4]; - uint32_t bits[2]; - unsigned char in[64]; -}; -} // namespace duckdb -namespace duckdb { -/* - * Note: this code is harmless on little-endian machines. - */ -static void ByteReverse(unsigned char *buf, unsigned longs) { - uint32_t t; - do { - t = (uint32_t)((unsigned)buf[3] << 8 | buf[2]) << 16 | ((unsigned)buf[1] << 8 | buf[0]); - *(uint32_t *)buf = t; - buf += 4; - } while (--longs); -} -/* The four core functions - F1 is optimized somewhat */ -/* #define F1(x, y, z) (x & y | ~x & z) */ -#define F1(x, y, z) ((z) ^ ((x) & ((y) ^ (z)))) -#define F2(x, y, z) F1(z, x, y) -#define F3(x, y, z) ((x) ^ (y) ^ (z)) -#define F4(x, y, z) ((y) ^ ((x) | ~(z))) -/* This is the central step in the MD5 algorithm. */ -#define MD5STEP(f, w, x, y, z, data, s) ((w) += f(x, y, z) + (data), (w) = (w) << (s) | (w) >> (32 - (s)), (w) += (x)) -/* - * The core of the MD5 algorithm, this alters an existing MD5 hash to - * reflect the addition of 16 longwords of new data. MD5Update blocks - * the data and converts bytes into longwords for this routine. - */ -static void MD5Transform(uint32_t buf[4], const uint32_t in[16]) { - uint32_t a, b, c, d; - a = buf[0]; - b = buf[1]; - c = buf[2]; - d = buf[3]; +#include - MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7); - MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12); - MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17); - MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22); - MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7); - MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12); - MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17); - MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22); - MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7); - MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12); - MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17); - MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22); - MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7); - MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12); - MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17); - MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22); +namespace duckdb { - MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5); - MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9); - MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14); - MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20); - MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5); - MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9); - MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14); - MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20); - MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5); - MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9); - MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14); - MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20); - MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5); - MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9); - MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14); - MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20); - - MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4); - MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11); - MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16); - MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23); - MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4); - MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11); - MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16); - MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23); - MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4); - MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11); - MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16); - MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23); - MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4); - MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11); - MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16); - MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23); - - MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6); - MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10); - MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15); - MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21); - MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6); - MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10); - MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15); - MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21); - MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6); - MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10); - MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15); - MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21); - MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6); - MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10); - MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15); - MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21); - - buf[0] += a; - buf[1] += b; - buf[2] += c; - buf[3] += d; +void ViewCatalogEntry::Initialize(CreateViewInfo *info) { + query = move(info->query); + this->aliases = info->aliases; + this->types = info->types; + this->temporary = info->temporary; + this->sql = info->sql; + this->internal = info->internal; } -/* - * Start MD5 accumulation. Set bit count to 0 and buffer to mysterious - * initialization constants. - */ -MD5Context::MD5Context() { - buf[0] = 0x67452301; - buf[1] = 0xefcdab89; - buf[2] = 0x98badcfe; - buf[3] = 0x10325476; - bits[0] = 0; - bits[1] = 0; +ViewCatalogEntry::ViewCatalogEntry(Catalog *catalog, SchemaCatalogEntry *schema, CreateViewInfo *info) + : StandardEntry(CatalogType::VIEW_ENTRY, schema, catalog, info->view_name) { + Initialize(info); } -/* - * Update context to reflect the concatenation of another buffer full - * of bytes. - */ -void MD5Context::MD5Update(const_data_ptr_t input, idx_t len) { - uint32_t t; - - /* Update bitcount */ - - t = bits[0]; - if ((bits[0] = t + ((uint32_t)len << 3)) < t) { - bits[1]++; /* Carry from low to high */ +unique_ptr ViewCatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { + D_ASSERT(!internal); + if (info->type != AlterType::ALTER_VIEW) { + throw CatalogException("Can only modify view with ALTER VIEW statement"); } - bits[1] += len >> 29; - - t = (t >> 3) & 0x3f; /* Bytes already in shsInfo->data */ + auto view_info = (AlterViewInfo *)info; + switch (view_info->alter_view_type) { + case AlterViewType::RENAME_VIEW: { + auto rename_info = (RenameViewInfo *)view_info; + auto copied_view = Copy(context); + copied_view->name = rename_info->new_view_name; + return copied_view; + } + default: + throw InternalException("Unrecognized alter view type!"); + } +} - /* Handle any leading odd-sized chunks */ +void ViewCatalogEntry::Serialize(Serializer &serializer) { + D_ASSERT(!internal); + FieldWriter writer(serializer); + writer.WriteString(schema->name); + writer.WriteString(name); + writer.WriteString(sql); + writer.WriteSerializable(*query); + writer.WriteList(aliases); + writer.WriteRegularSerializableList(types); + writer.Finalize(); +} - if (t) { - unsigned char *p = (unsigned char *)in + t; +unique_ptr ViewCatalogEntry::Deserialize(Deserializer &source, ClientContext &context) { + auto info = make_unique(); - t = 64 - t; - if (len < t) { - memcpy(p, input, len); - return; - } - memcpy(p, input, t); - ByteReverse(in, 16); - MD5Transform(buf, (uint32_t *)in); - input += t; - len -= t; - } + FieldReader reader(source); + info->schema = reader.ReadRequired(); + info->view_name = reader.ReadRequired(); + info->sql = reader.ReadRequired(); + info->query = reader.ReadRequiredSerializable(); + info->aliases = reader.ReadRequiredList(); + info->types = reader.ReadRequiredSerializableList(); + reader.Finalize(); - /* Process data in 64-byte chunks */ + return info; +} - while (len >= 64) { - memcpy(in, input, 64); - ByteReverse(in, 16); - MD5Transform(buf, (uint32_t *)in); - input += 64; - len -= 64; +string ViewCatalogEntry::ToSQL() { + if (sql.empty()) { + //! Return empty sql with view name so pragma view_tables don't complain + return sql; } - - /* Handle any remaining bytes of data. */ - memcpy(in, input, len); + return sql + "\n;"; } -/* - * Final wrapup - pad to 64-byte boundary with the bit pattern - * 1 0* (64-bit count of bits processed, MSB-first) - */ -void MD5Context::Finish(data_ptr_t out_digest) { - unsigned count; - unsigned char *p; +unique_ptr ViewCatalogEntry::Copy(ClientContext &context) { + D_ASSERT(!internal); + auto create_info = make_unique(schema->name, name); + create_info->query = unique_ptr_cast(query->Copy()); + for (idx_t i = 0; i < aliases.size(); i++) { + create_info->aliases.push_back(aliases[i]); + } + for (idx_t i = 0; i < types.size(); i++) { + create_info->types.push_back(types[i]); + } + create_info->temporary = temporary; + create_info->sql = sql; - /* Compute number of bytes mod 64 */ - count = (bits[0] >> 3) & 0x3F; + return make_unique(catalog, schema, create_info.get()); +} - /* Set the first char of padding to 0x80. This is safe since there is - always at least one byte free */ - p = in + count; - *p++ = 0x80; +} // namespace duckdb - /* Bytes of padding needed to make 64 bytes */ - count = 64 - 1 - count; - /* Pad out to 56 mod 64 */ - if (count < 8) { - /* Two lots of padding: Pad the first block to 64 bytes */ - memset(p, 0, count); - ByteReverse(in, 16); - MD5Transform(buf, (uint32_t *)in); - /* Now fill the next block with 56 bytes */ - memset(in, 0, 56); - } else { - /* Pad block to 56 bytes */ - memset(p, 0, count - 8); - } - ByteReverse(in, 14); - /* Append length in bits and transform */ - ((uint32_t *)in)[14] = bits[0]; - ((uint32_t *)in)[15] = bits[1]; +namespace duckdb { - MD5Transform(buf, (uint32_t *)in); - ByteReverse((unsigned char *)buf, 4); - memcpy(out_digest, buf, 16); +CatalogEntry::CatalogEntry(CatalogType type, Catalog *catalog_p, string name_p) + : oid(catalog_p->ModifyCatalog()), type(type), catalog(catalog_p), set(nullptr), name(move(name_p)), deleted(false), + temporary(false), internal(false), parent(nullptr) { } -void MD5Context::DigestToBase16(const_data_ptr_t digest, char *zbuf) { - static char const HEX_CODES[] = "0123456789abcdef"; - int i, j; +CatalogEntry::~CatalogEntry() { +} - for (j = i = 0; i < 16; i++) { - int a = digest[i]; - zbuf[j++] = HEX_CODES[(a >> 4) & 0xf]; - zbuf[j++] = HEX_CODES[a & 0xf]; - } +void CatalogEntry::SetAsRoot() { } -void MD5Context::FinishHex(char *out_digest) { - data_t digest[MD5_HASH_LENGTH_BINARY]; - Finish(digest); - DigestToBase16(digest, out_digest); +// LCOV_EXCL_START +unique_ptr CatalogEntry::AlterEntry(ClientContext &context, AlterInfo *info) { + throw InternalException("Unsupported alter type for catalog entry!"); } -string MD5Context::FinishHex() { - char digest[MD5_HASH_LENGTH_TEXT]; - FinishHex(digest); - return string(digest, MD5_HASH_LENGTH_TEXT); +unique_ptr CatalogEntry::Copy(ClientContext &context) { + throw InternalException("Unsupported copy type for catalog entry!"); } -void MD5Context::Add(const char *data) { - MD5Update((const_data_ptr_t)data, strlen(data)); +string CatalogEntry::ToSQL() { + throw InternalException("Unsupported catalog type for ToSQL()"); } +// LCOV_EXCL_STOP } // namespace duckdb -// This file is licensed under Apache License 2.0 -// Source code taken from https://github.com/google/benchmark -// It is highly modified + + + + namespace duckdb { -inline uint64_t ChronoNow() { - return std::chrono::duration_cast( - std::chrono::time_point_cast(std::chrono::high_resolution_clock::now()) - .time_since_epoch()) - .count(); +CatalogSearchPath::CatalogSearchPath(ClientContext &context_p) : context(context_p) { + SetPaths(ParsePaths("")); } -inline uint64_t Now() { -#if defined(RDTSC) -#if defined(__i386__) - uint64_t ret; - __asm__ volatile("rdtsc" : "=A"(ret)); - return ret; -#elif defined(__x86_64__) || defined(__amd64__) - uint64_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return (high << 32) | low; -#elif defined(__powerpc__) || defined(__ppc__) - uint64_t tbl, tbu0, tbu1; - asm("mftbu %0" : "=r"(tbu0)); - asm("mftb %0" : "=r"(tbl)); - asm("mftbu %0" : "=r"(tbu1)); - tbl &= -static_cast(tbu0 == tbu1); - return (tbu1 << 32) | tbl; -#elif defined(__sparc__) - uint64_t tick; - asm(".byte 0x83, 0x41, 0x00, 0x00"); - asm("mov %%g1, %0" : "=r"(tick)); - return tick; -#elif defined(__ia64__) - uint64_t itc; - asm("mov %0 = ar.itc" : "=r"(itc)); - return itc; -#elif defined(COMPILER_MSVC) && defined(_M_IX86) - _asm rdtsc -#elif defined(COMPILER_MSVC) - return __rdtsc(); -#elif defined(__aarch64__) - uint64_t virtual_timer_value; - asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value)); - return virtual_timer_value; -#elif defined(__ARM_ARCH) -#if (__ARM_ARCH >= 6) - uint32_t pmccntr; - uint32_t pmuseren; - uint32_t pmcntenset; - asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren)); - if (pmuseren & 1) { // Allows reading perfmon counters for user mode code. - asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset)); - if (pmcntenset & 0x80000000ul) { // Is it counting? - asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr)); - return static_cast(pmccntr) * 64; // Should optimize to << 6 +void CatalogSearchPath::Set(vector &new_paths, bool is_set_schema) { + if (is_set_schema && new_paths.size() != 1) { + throw CatalogException("SET schema can set only 1 schema. This has %d", new_paths.size()); + } + auto &catalog = Catalog::GetCatalog(context); + for (const auto &path : new_paths) { + if (!catalog.GetSchema(context, StringUtil::Lower(path), true)) { + throw CatalogException("SET %s: No schema named %s found.", is_set_schema ? "schema" : "search_path", path); } } -#endif - return ChronoNow(); -#else - return ChronoNow(); -#endif -#else - return ChronoNow(); -#endif // defined(RDTSC) -} -uint64_t CycleCounter::Tick() const { - return Now(); + this->set_paths = move(new_paths); + SetPaths(set_paths); } -} // namespace duckdb +void CatalogSearchPath::Set(const string &new_value, bool is_set_schema) { + auto new_paths = ParsePaths(new_value); + Set(new_paths, is_set_schema); +} +const vector &CatalogSearchPath::Get() { + return paths; +} +const string &CatalogSearchPath::GetOrDefault(const string &name) { + return name == INVALID_SCHEMA ? GetDefault() : name; // NOLINT +} -namespace duckdb { +const string &CatalogSearchPath::GetDefault() { + const auto &paths = Get(); + D_ASSERT(paths.size() >= 2); + D_ASSERT(paths[0] == TEMP_SCHEMA); + return paths[1]; +} -// LCOV_EXCL_START -string CatalogTypeToString(CatalogType type) { - switch (type) { - case CatalogType::COLLATION_ENTRY: - return "Collation"; - case CatalogType::TYPE_ENTRY: - return "Type"; - case CatalogType::TABLE_ENTRY: - return "Table"; - case CatalogType::SCHEMA_ENTRY: - return "Schema"; - case CatalogType::TABLE_FUNCTION_ENTRY: - return "Table Function"; - case CatalogType::SCALAR_FUNCTION_ENTRY: - return "Scalar Function"; - case CatalogType::AGGREGATE_FUNCTION_ENTRY: - return "Aggregate Function"; - case CatalogType::COPY_FUNCTION_ENTRY: - return "Copy Function"; - case CatalogType::PRAGMA_FUNCTION_ENTRY: - return "Pragma Function"; - case CatalogType::MACRO_ENTRY: - return "Macro Function"; - case CatalogType::TABLE_MACRO_ENTRY: - return "Table Macro Function"; - case CatalogType::VIEW_ENTRY: - return "View"; - case CatalogType::INDEX_ENTRY: - return "Index"; - case CatalogType::PREPARED_STATEMENT: - return "Prepared Statement"; - case CatalogType::SEQUENCE_ENTRY: - return "Sequence"; - case CatalogType::INVALID: - case CatalogType::DELETED_ENTRY: - case CatalogType::UPDATED_ENTRY: - break; +void CatalogSearchPath::SetPaths(vector new_paths) { + paths.clear(); + paths.reserve(new_paths.size() + 3); + paths.emplace_back(TEMP_SCHEMA); + for (auto &path : new_paths) { + paths.push_back(move(path)); } - return "INVALID"; + paths.emplace_back(DEFAULT_SCHEMA); + paths.emplace_back("pg_catalog"); +} + +vector CatalogSearchPath::ParsePaths(const string &value) { + return StringUtil::SplitWithQuote(StringUtil::Lower(value)); } -// LCOV_EXCL_STOP } // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/transaction/transaction_manager.hpp +// +// +//===----------------------------------------------------------------------===// + + + + + + + + + + namespace duckdb { -// LCOV_EXCL_START -CompressionType CompressionTypeFromString(const string &str) { - auto compression = StringUtil::Lower(str); - if (compression == "uncompressed") { - return CompressionType::COMPRESSION_UNCOMPRESSED; - } else if (compression == "rle") { - return CompressionType::COMPRESSION_RLE; - } else if (compression == "dictionary") { - return CompressionType::COMPRESSION_DICTIONARY; - } else if (compression == "pfor") { - return CompressionType::COMPRESSION_PFOR_DELTA; - } else if (compression == "bitpacking") { - return CompressionType::COMPRESSION_BITPACKING; - } else if (compression == "fsst") { - return CompressionType::COMPRESSION_FSST; - } else { - return CompressionType::COMPRESSION_AUTO; +class ClientContext; +class Catalog; +struct ClientLockWrapper; +class DatabaseInstance; +class Transaction; + +//! The Transaction Manager is responsible for creating and managing +//! transactions +class TransactionManager { + friend struct CheckpointLock; + +public: + explicit TransactionManager(DatabaseInstance &db); + ~TransactionManager(); + + //! Start a new transaction + Transaction *StartTransaction(ClientContext &context); + //! Commit the given transaction + string CommitTransaction(ClientContext &context, Transaction *transaction); + //! Rollback the given transaction + void RollbackTransaction(Transaction *transaction); + + transaction_t GetQueryNumber() { + return current_query_number++; + } + transaction_t LowestActiveId() { + return lowest_active_id; + } + transaction_t LowestActiveStart() { + return lowest_active_start; } -} -string CompressionTypeToString(CompressionType type) { - switch (type) { - case CompressionType::COMPRESSION_UNCOMPRESSED: - return "Uncompressed"; - case CompressionType::COMPRESSION_CONSTANT: - return "Constant"; - case CompressionType::COMPRESSION_RLE: - return "RLE"; - case CompressionType::COMPRESSION_DICTIONARY: - return "Dictionary"; - case CompressionType::COMPRESSION_PFOR_DELTA: - return "PFOR"; - case CompressionType::COMPRESSION_BITPACKING: - return "BitPacking"; - case CompressionType::COMPRESSION_FSST: - return "FSST"; - default: - throw InternalException("Unrecognized compression type!"); + void Checkpoint(ClientContext &context, bool force = false); + + static TransactionManager &Get(ClientContext &context); + static TransactionManager &Get(DatabaseInstance &db); + + void SetBaseCommitId(transaction_t base) { + D_ASSERT(base >= TRANSACTION_ID_START); + current_transaction_id = base; } -} -// LCOV_EXCL_STOP + +private: + bool CanCheckpoint(Transaction *current = nullptr); + //! Remove the given transaction from the list of active transactions + void RemoveTransaction(Transaction *transaction) noexcept; + void LockClients(vector &client_locks, ClientContext &context); + + //! The database instance + DatabaseInstance &db; + //! The current query number + atomic current_query_number; + //! The current start timestamp used by transactions + transaction_t current_start_timestamp; + //! The current transaction ID used by transactions + transaction_t current_transaction_id; + //! The lowest active transaction id + atomic lowest_active_id; + //! The lowest active transaction timestamp + atomic lowest_active_start; + //! Set of currently running transactions + vector> active_transactions; + //! Set of recently committed transactions + vector> recently_committed_transactions; + //! Transactions awaiting GC + vector> old_transactions; + //! The lock used for transaction operations + mutex transaction_lock; + + bool thread_is_checkpointing; +}; } // namespace duckdb + + + + + namespace duckdb { -// LCOV_EXCL_START -string ExpressionTypeToString(ExpressionType type) { - switch (type) { - case ExpressionType::OPERATOR_CAST: - return "CAST"; - case ExpressionType::OPERATOR_NOT: - return "NOT"; - case ExpressionType::OPERATOR_IS_NULL: - return "IS_NULL"; - case ExpressionType::OPERATOR_IS_NOT_NULL: - return "IS_NOT_NULL"; - case ExpressionType::COMPARE_EQUAL: - return "EQUAL"; - case ExpressionType::COMPARE_NOTEQUAL: - return "NOTEQUAL"; - case ExpressionType::COMPARE_LESSTHAN: - return "LESSTHAN"; - case ExpressionType::COMPARE_GREATERTHAN: - return "GREATERTHAN"; - case ExpressionType::COMPARE_LESSTHANOREQUALTO: - return "LESSTHANOREQUALTO"; - case ExpressionType::COMPARE_GREATERTHANOREQUALTO: - return "GREATERTHANOREQUALTO"; - case ExpressionType::COMPARE_IN: - return "IN"; - case ExpressionType::COMPARE_DISTINCT_FROM: - return "DISTINCT_FROM"; - case ExpressionType::COMPARE_NOT_DISTINCT_FROM: - return "NOT_DISTINCT_FROM"; - case ExpressionType::CONJUNCTION_AND: - return "AND"; - case ExpressionType::CONJUNCTION_OR: - return "OR"; - case ExpressionType::VALUE_CONSTANT: - return "CONSTANT"; - case ExpressionType::VALUE_PARAMETER: - return "PARAMETER"; - case ExpressionType::VALUE_TUPLE: - return "TUPLE"; - case ExpressionType::VALUE_TUPLE_ADDRESS: - return "TUPLE_ADDRESS"; - case ExpressionType::VALUE_NULL: - return "NULL"; - case ExpressionType::VALUE_VECTOR: - return "VECTOR"; - case ExpressionType::VALUE_SCALAR: - return "SCALAR"; - case ExpressionType::AGGREGATE: - return "AGGREGATE"; - case ExpressionType::WINDOW_AGGREGATE: - return "WINDOW_AGGREGATE"; - case ExpressionType::WINDOW_RANK: - return "RANK"; - case ExpressionType::WINDOW_RANK_DENSE: - return "RANK_DENSE"; - case ExpressionType::WINDOW_PERCENT_RANK: - return "PERCENT_RANK"; - case ExpressionType::WINDOW_ROW_NUMBER: - return "ROW_NUMBER"; - case ExpressionType::WINDOW_FIRST_VALUE: - return "FIRST_VALUE"; - case ExpressionType::WINDOW_LAST_VALUE: - return "LAST_VALUE"; - case ExpressionType::WINDOW_NTH_VALUE: - return "NTH_VALUE"; - case ExpressionType::WINDOW_CUME_DIST: - return "CUME_DIST"; - case ExpressionType::WINDOW_LEAD: - return "LEAD"; - case ExpressionType::WINDOW_LAG: - return "LAG"; - case ExpressionType::WINDOW_NTILE: - return "NTILE"; - case ExpressionType::FUNCTION: - return "FUNCTION"; - case ExpressionType::CASE_EXPR: - return "CASE"; - case ExpressionType::OPERATOR_NULLIF: - return "NULLIF"; - case ExpressionType::OPERATOR_COALESCE: - return "COALESCE"; - case ExpressionType::ARRAY_EXTRACT: - return "ARRAY_EXTRACT"; - case ExpressionType::ARRAY_SLICE: - return "ARRAY_SLICE"; - case ExpressionType::STRUCT_EXTRACT: - return "STRUCT_EXTRACT"; - case ExpressionType::SUBQUERY: - return "SUBQUERY"; - case ExpressionType::STAR: - return "STAR"; - case ExpressionType::PLACEHOLDER: - return "PLACEHOLDER"; - case ExpressionType::COLUMN_REF: - return "COLUMN_REF"; - case ExpressionType::FUNCTION_REF: - return "FUNCTION_REF"; - case ExpressionType::TABLE_REF: - return "TABLE_REF"; - case ExpressionType::CAST: - return "CAST"; - case ExpressionType::COMPARE_NOT_IN: - return "COMPARE_NOT_IN"; - case ExpressionType::COMPARE_BETWEEN: - return "COMPARE_BETWEEN"; - case ExpressionType::COMPARE_NOT_BETWEEN: - return "COMPARE_NOT_BETWEEN"; - case ExpressionType::VALUE_DEFAULT: - return "VALUE_DEFAULT"; - case ExpressionType::BOUND_REF: - return "BOUND_REF"; - case ExpressionType::BOUND_COLUMN_REF: - return "BOUND_COLUMN_REF"; - case ExpressionType::BOUND_FUNCTION: - return "BOUND_FUNCTION"; - case ExpressionType::BOUND_AGGREGATE: - return "BOUND_AGGREGATE"; - case ExpressionType::GROUPING_FUNCTION: - return "GROUPING"; - case ExpressionType::ARRAY_CONSTRUCTOR: - return "ARRAY_CONSTRUCTOR"; - case ExpressionType::TABLE_STAR: - return "TABLE_STAR"; - case ExpressionType::BOUND_UNNEST: - return "BOUND_UNNEST"; - case ExpressionType::COLLATE: - return "COLLATE"; - case ExpressionType::POSITIONAL_REFERENCE: - return "POSITIONAL_REFERENCE"; - case ExpressionType::LAMBDA: - return "LAMBDA"; - case ExpressionType::ARROW: - return "ARROW"; - case ExpressionType::INVALID: - break; +//! Class responsible to keep track of state when removing entries from the catalog. +//! When deleting, many types of errors can be thrown, since we want to avoid try/catch blocks +//! this class makes sure that whatever elements were modified are returned to a correct state +//! when exceptions are thrown. +//! The idea here is to use RAII (Resource acquisition is initialization) to mimic a try/catch/finally block. +//! If any exception is raised when this object exists, then its destructor will be called +//! and the entry will return to its previous state during deconstruction. +class EntryDropper { +public: + //! Both constructor and destructor are privates because they should only be called by DropEntryDependencies + explicit EntryDropper(CatalogSet &catalog_set, idx_t entry_index) + : catalog_set(catalog_set), entry_index(entry_index) { + old_deleted = catalog_set.entries[entry_index].get()->deleted; } - return "INVALID"; -} -// LCOV_EXCL_STOP -string ExpressionTypeToOperator(ExpressionType type) { - switch (type) { - case ExpressionType::COMPARE_EQUAL: - return "="; - case ExpressionType::COMPARE_NOTEQUAL: - return "!="; - case ExpressionType::COMPARE_LESSTHAN: - return "<"; - case ExpressionType::COMPARE_GREATERTHAN: - return ">"; - case ExpressionType::COMPARE_LESSTHANOREQUALTO: - return "<="; - case ExpressionType::COMPARE_GREATERTHANOREQUALTO: - return ">="; - case ExpressionType::COMPARE_DISTINCT_FROM: - return "IS DISTINCT FROM"; - case ExpressionType::COMPARE_NOT_DISTINCT_FROM: - return "IS NOT DISTINCT FROM"; - case ExpressionType::CONJUNCTION_AND: - return "AND"; - case ExpressionType::CONJUNCTION_OR: - return "OR"; - default: - return ""; + ~EntryDropper() { + catalog_set.entries[entry_index].get()->deleted = old_deleted; } + +private: + //! The current catalog_set + CatalogSet &catalog_set; + //! Keeps track of the state of the entry before starting the delete + bool old_deleted; + //! Index of entry to be deleted + idx_t entry_index; +}; + +CatalogSet::CatalogSet(Catalog &catalog, unique_ptr defaults) + : catalog(catalog), defaults(move(defaults)) { } -ExpressionType NegateComparisionExpression(ExpressionType type) { - ExpressionType negated_type = ExpressionType::INVALID; - switch (type) { - case ExpressionType::COMPARE_EQUAL: - negated_type = ExpressionType::COMPARE_NOTEQUAL; - break; - case ExpressionType::COMPARE_NOTEQUAL: - negated_type = ExpressionType::COMPARE_EQUAL; - break; - case ExpressionType::COMPARE_LESSTHAN: - negated_type = ExpressionType::COMPARE_GREATERTHANOREQUALTO; - break; - case ExpressionType::COMPARE_GREATERTHAN: - negated_type = ExpressionType::COMPARE_LESSTHANOREQUALTO; - break; - case ExpressionType::COMPARE_LESSTHANOREQUALTO: - negated_type = ExpressionType::COMPARE_GREATERTHAN; - break; - case ExpressionType::COMPARE_GREATERTHANOREQUALTO: - negated_type = ExpressionType::COMPARE_LESSTHAN; - break; - default: - throw InternalException("Unsupported comparison type in negation"); +bool CatalogSet::CreateEntry(ClientContext &context, const string &name, unique_ptr value, + unordered_set &dependencies) { + auto &transaction = Transaction::GetTransaction(context); + // lock the catalog for writing + lock_guard write_lock(catalog.write_lock); + // lock this catalog set to disallow reading + unique_lock read_lock(catalog_lock); + + // first check if the entry exists in the unordered set + idx_t entry_index; + auto mapping_value = GetMapping(context, name); + if (mapping_value == nullptr || mapping_value->deleted) { + // if it does not: entry has never been created + + // check if there is a default entry + auto entry = CreateDefaultEntry(context, name, read_lock); + if (entry) { + return false; + } + + // first create a dummy deleted entry for this entry + // so transactions started before the commit of this transaction don't + // see it yet + entry_index = current_entry++; + auto dummy_node = make_unique(CatalogType::INVALID, value->catalog, name); + dummy_node->timestamp = 0; + dummy_node->deleted = true; + dummy_node->set = this; + + entries[entry_index] = move(dummy_node); + PutMapping(context, name, entry_index); + } else { + entry_index = mapping_value->index; + auto ¤t = *entries[entry_index]; + // if it does, we have to check version numbers + if (HasConflict(context, current.timestamp)) { + // current version has been written to by a currently active + // transaction + throw TransactionException("Catalog write-write conflict on create with \"%s\"", current.name); + } + // there is a current version that has been committed + // if it has not been deleted there is a conflict + if (!current.deleted) { + return false; + } } - return negated_type; + // create a new entry and replace the currently stored one + // set the timestamp to the timestamp of the current transaction + // and point it at the dummy node + value->timestamp = transaction.transaction_id; + value->set = this; + + // now add the dependency set of this object to the dependency manager + catalog.dependency_manager->AddObject(context, value.get(), dependencies); + + value->child = move(entries[entry_index]); + value->child->parent = value.get(); + // push the old entry in the undo buffer for this transaction + transaction.PushCatalogEntry(value->child.get()); + entries[entry_index] = move(value); + return true; } -ExpressionType FlipComparisionExpression(ExpressionType type) { - ExpressionType flipped_type = ExpressionType::INVALID; - switch (type) { - case ExpressionType::COMPARE_NOT_DISTINCT_FROM: - case ExpressionType::COMPARE_DISTINCT_FROM: - case ExpressionType::COMPARE_NOTEQUAL: - case ExpressionType::COMPARE_EQUAL: - flipped_type = type; - break; - case ExpressionType::COMPARE_LESSTHAN: - flipped_type = ExpressionType::COMPARE_GREATERTHAN; - break; - case ExpressionType::COMPARE_GREATERTHAN: - flipped_type = ExpressionType::COMPARE_LESSTHAN; - break; - case ExpressionType::COMPARE_LESSTHANOREQUALTO: - flipped_type = ExpressionType::COMPARE_GREATERTHANOREQUALTO; - break; - case ExpressionType::COMPARE_GREATERTHANOREQUALTO: - flipped_type = ExpressionType::COMPARE_LESSTHANOREQUALTO; - break; - default: - throw InternalException("Unsupported comparison type in flip"); +bool CatalogSet::GetEntryInternal(ClientContext &context, idx_t entry_index, CatalogEntry *&catalog_entry) { + catalog_entry = entries[entry_index].get(); + // if it does: we have to retrieve the entry and to check version numbers + if (HasConflict(context, catalog_entry->timestamp)) { + // current version has been written to by a currently active + // transaction + throw TransactionException("Catalog write-write conflict on alter with \"%s\"", catalog_entry->name); } - return flipped_type; + // there is a current version that has been committed by this transaction + if (catalog_entry->deleted) { + // if the entry was already deleted, it now does not exist anymore + // so we return that we could not find it + return false; + } + return true; } -ExpressionType OperatorToExpressionType(const string &op) { - if (op == "=" || op == "==") { - return ExpressionType::COMPARE_EQUAL; - } else if (op == "!=" || op == "<>") { - return ExpressionType::COMPARE_NOTEQUAL; - } else if (op == "<") { - return ExpressionType::COMPARE_LESSTHAN; - } else if (op == ">") { - return ExpressionType::COMPARE_GREATERTHAN; - } else if (op == "<=") { - return ExpressionType::COMPARE_LESSTHANOREQUALTO; - } else if (op == ">=") { - return ExpressionType::COMPARE_GREATERTHANOREQUALTO; +bool CatalogSet::GetEntryInternal(ClientContext &context, const string &name, idx_t &entry_index, + CatalogEntry *&catalog_entry) { + auto mapping_value = GetMapping(context, name); + if (mapping_value == nullptr || mapping_value->deleted) { + // the entry does not exist, check if we can create a default entry + return false; } - return ExpressionType::INVALID; + entry_index = mapping_value->index; + return GetEntryInternal(context, entry_index, catalog_entry); } -string ExpressionClassToString(ExpressionClass type) { - switch (type) { - case ExpressionClass::INVALID: - return "INVALID"; - case ExpressionClass::AGGREGATE: - return "AGGREGATE"; - case ExpressionClass::CASE: - return "CASE"; - case ExpressionClass::CAST: - return "CAST"; - case ExpressionClass::COLUMN_REF: - return "COLUMN_REF"; - case ExpressionClass::COMPARISON: - return "COMPARISON"; - case ExpressionClass::CONJUNCTION: - return "CONJUNCTION"; - case ExpressionClass::CONSTANT: - return "CONSTANT"; - case ExpressionClass::DEFAULT: - return "DEFAULT"; - case ExpressionClass::FUNCTION: - return "FUNCTION"; - case ExpressionClass::OPERATOR: - return "OPERATOR"; - case ExpressionClass::STAR: - return "STAR"; - case ExpressionClass::SUBQUERY: - return "SUBQUERY"; - case ExpressionClass::WINDOW: - return "WINDOW"; - case ExpressionClass::PARAMETER: - return "PARAMETER"; - case ExpressionClass::COLLATE: - return "COLLATE"; - case ExpressionClass::LAMBDA: - return "LAMBDA"; - case ExpressionClass::POSITIONAL_REFERENCE: - return "POSITIONAL_REFERENCE"; - case ExpressionClass::BETWEEN: - return "BETWEEN"; - case ExpressionClass::BOUND_AGGREGATE: - return "BOUND_AGGREGATE"; - case ExpressionClass::BOUND_CASE: - return "BOUND_CASE"; - case ExpressionClass::BOUND_CAST: - return "BOUND_CAST"; - case ExpressionClass::BOUND_COLUMN_REF: - return "BOUND_COLUMN_REF"; - case ExpressionClass::BOUND_COMPARISON: - return "BOUND_COMPARISON"; - case ExpressionClass::BOUND_CONJUNCTION: - return "BOUND_CONJUNCTION"; - case ExpressionClass::BOUND_CONSTANT: - return "BOUND_CONSTANT"; - case ExpressionClass::BOUND_DEFAULT: - return "BOUND_DEFAULT"; - case ExpressionClass::BOUND_FUNCTION: - return "BOUND_FUNCTION"; - case ExpressionClass::BOUND_OPERATOR: - return "BOUND_OPERATOR"; - case ExpressionClass::BOUND_PARAMETER: - return "BOUND_PARAMETER"; - case ExpressionClass::BOUND_REF: - return "BOUND_REF"; - case ExpressionClass::BOUND_SUBQUERY: - return "BOUND_SUBQUERY"; - case ExpressionClass::BOUND_WINDOW: - return "BOUND_WINDOW"; - case ExpressionClass::BOUND_BETWEEN: - return "BOUND_BETWEEN"; - case ExpressionClass::BOUND_UNNEST: - return "BOUND_UNNEST"; - case ExpressionClass::BOUND_LAMBDA: - return "BOUND_LAMBDA"; - case ExpressionClass::BOUND_EXPRESSION: - return "BOUND_EXPRESSION"; - default: - return "ExpressionClass::!!UNIMPLEMENTED_CASE!!"; +bool CatalogSet::AlterOwnership(ClientContext &context, ChangeOwnershipInfo *info) { + idx_t entry_index; + CatalogEntry *entry; + if (!GetEntryInternal(context, info->name, entry_index, entry)) { + return false; } -} -} // namespace duckdb + auto owner_entry = catalog.GetEntry(context, info->owner_schema, info->owner_name); + if (!owner_entry) { + return false; + } + catalog.dependency_manager->AddOwnership(context, owner_entry, entry); + return true; +} -namespace duckdb { +bool CatalogSet::AlterEntry(ClientContext &context, const string &name, AlterInfo *alter_info) { + auto &transaction = Transaction::GetTransaction(context); + // lock the catalog for writing + lock_guard write_lock(catalog.write_lock); -FileCompressionType FileCompressionTypeFromString(const string &input) { - auto parameter = StringUtil::Lower(input); - if (parameter == "infer" || parameter == "auto") { - return FileCompressionType::AUTO_DETECT; - } else if (parameter == "gzip") { - return FileCompressionType::GZIP; - } else if (parameter == "zstd") { - return FileCompressionType::ZSTD; - } else if (parameter == "uncompressed" || parameter == "none" || parameter.empty()) { - return FileCompressionType::UNCOMPRESSED; - } else { - throw ParserException("Unrecognized file compression type \"%s\"", input); + // first check if the entry exists in the unordered set + idx_t entry_index; + CatalogEntry *entry; + if (!GetEntryInternal(context, name, entry_index, entry)) { + return false; + } + if (entry->internal) { + throw CatalogException("Cannot alter entry \"%s\" because it is an internal system entry", entry->name); } -} - -} // namespace duckdb + // lock this catalog set to disallow reading + lock_guard read_lock(catalog_lock); -namespace duckdb { + // create a new entry and replace the currently stored one + // set the timestamp to the timestamp of the current transaction + // and point it to the updated table node + string original_name = entry->name; + auto value = entry->AlterEntry(context, alter_info); + if (!value) { + // alter failed, but did not result in an error + return true; + } -string JoinTypeToString(JoinType type) { - switch (type) { - case JoinType::LEFT: - return "LEFT"; - case JoinType::RIGHT: - return "RIGHT"; - case JoinType::INNER: - return "INNER"; - case JoinType::OUTER: - return "FULL"; - case JoinType::SEMI: - return "SEMI"; - case JoinType::ANTI: - return "ANTI"; - case JoinType::SINGLE: - return "SINGLE"; - case JoinType::MARK: - return "MARK"; - case JoinType::INVALID: // LCOV_EXCL_START - break; + if (value->name != original_name) { + auto mapping_value = GetMapping(context, value->name); + if (mapping_value && !mapping_value->deleted) { + auto entry = GetEntryForTransaction(context, entries[mapping_value->index].get()); + if (!entry->deleted) { + string rename_err_msg = + "Could not rename \"%s\" to \"%s\": another entry with this name already exists!"; + throw CatalogException(rename_err_msg, original_name, value->name); + } + } } - return "INVALID"; -} // LCOV_EXCL_STOP -bool IsLeftOuterJoin(JoinType type) { - return type == JoinType::LEFT || type == JoinType::OUTER; -} + if (value->name != original_name) { + // Do PutMapping and DeleteMapping after dependency check + PutMapping(context, value->name, entry_index); + DeleteMapping(context, original_name); + } -bool IsRightOuterJoin(JoinType type) { - return type == JoinType::OUTER || type == JoinType::RIGHT; -} + value->timestamp = transaction.transaction_id; + value->child = move(entries[entry_index]); + value->child->parent = value.get(); + value->set = this; -} // namespace duckdb + // serialize the AlterInfo into a temporary buffer + BufferedSerializer serializer; + alter_info->Serialize(serializer); + BinaryData serialized_alter = serializer.GetData(); + auto new_entry = value.get(); -namespace duckdb { + // push the old entry in the undo buffer for this transaction + transaction.PushCatalogEntry(value->child.get(), serialized_alter.data.get(), serialized_alter.size); + entries[entry_index] = move(value); -//===--------------------------------------------------------------------===// -// Value <--> String Utilities -//===--------------------------------------------------------------------===// -// LCOV_EXCL_START -string LogicalOperatorToString(LogicalOperatorType type) { - switch (type) { - case LogicalOperatorType::LOGICAL_GET: - return "GET"; - case LogicalOperatorType::LOGICAL_CHUNK_GET: - return "CHUNK_GET"; - case LogicalOperatorType::LOGICAL_DELIM_GET: - return "DELIM_GET"; - case LogicalOperatorType::LOGICAL_EMPTY_RESULT: - return "EMPTY_RESULT"; - case LogicalOperatorType::LOGICAL_EXPRESSION_GET: - return "EXPRESSION_GET"; - case LogicalOperatorType::LOGICAL_ANY_JOIN: - return "ANY_JOIN"; - case LogicalOperatorType::LOGICAL_COMPARISON_JOIN: - return "COMPARISON_JOIN"; - case LogicalOperatorType::LOGICAL_DELIM_JOIN: - return "DELIM_JOIN"; - case LogicalOperatorType::LOGICAL_PROJECTION: - return "PROJECTION"; - case LogicalOperatorType::LOGICAL_FILTER: - return "FILTER"; - case LogicalOperatorType::LOGICAL_AGGREGATE_AND_GROUP_BY: - return "AGGREGATE"; - case LogicalOperatorType::LOGICAL_WINDOW: - return "WINDOW"; - case LogicalOperatorType::LOGICAL_UNNEST: - return "UNNEST"; - case LogicalOperatorType::LOGICAL_LIMIT: - return "LIMIT"; - case LogicalOperatorType::LOGICAL_ORDER_BY: - return "ORDER_BY"; - case LogicalOperatorType::LOGICAL_TOP_N: - return "TOP_N"; - case LogicalOperatorType::LOGICAL_SAMPLE: - return "SAMPLE"; - case LogicalOperatorType::LOGICAL_LIMIT_PERCENT: - return "LIMIT_PERCENT"; - case LogicalOperatorType::LOGICAL_COPY_TO_FILE: - return "COPY_TO_FILE"; - case LogicalOperatorType::LOGICAL_JOIN: - return "JOIN"; - case LogicalOperatorType::LOGICAL_CROSS_PRODUCT: - return "CROSS_PRODUCT"; - case LogicalOperatorType::LOGICAL_UNION: - return "UNION"; - case LogicalOperatorType::LOGICAL_EXCEPT: - return "EXCEPT"; - case LogicalOperatorType::LOGICAL_INTERSECT: - return "INTERSECT"; - case LogicalOperatorType::LOGICAL_INSERT: - return "INSERT"; - case LogicalOperatorType::LOGICAL_DISTINCT: - return "DISTINCT"; - case LogicalOperatorType::LOGICAL_DELETE: - return "DELETE"; - case LogicalOperatorType::LOGICAL_UPDATE: - return "UPDATE"; - case LogicalOperatorType::LOGICAL_PREPARE: - return "PREPARE"; - case LogicalOperatorType::LOGICAL_DUMMY_SCAN: - return "DUMMY_SCAN"; - case LogicalOperatorType::LOGICAL_CREATE_INDEX: - return "CREATE_INDEX"; - case LogicalOperatorType::LOGICAL_CREATE_TABLE: - return "CREATE_TABLE"; - case LogicalOperatorType::LOGICAL_CREATE_MACRO: - return "CREATE_MACRO"; - case LogicalOperatorType::LOGICAL_EXPLAIN: - return "EXPLAIN"; - case LogicalOperatorType::LOGICAL_EXECUTE: - return "EXECUTE"; - case LogicalOperatorType::LOGICAL_VACUUM: - return "VACUUM"; - case LogicalOperatorType::LOGICAL_RECURSIVE_CTE: - return "REC_CTE"; - case LogicalOperatorType::LOGICAL_CTE_REF: - return "CTE_SCAN"; - case LogicalOperatorType::LOGICAL_SHOW: - return "SHOW"; - case LogicalOperatorType::LOGICAL_ALTER: - return "ALTER"; - case LogicalOperatorType::LOGICAL_CREATE_SEQUENCE: - return "CREATE_SEQUENCE"; - case LogicalOperatorType::LOGICAL_CREATE_TYPE: - return "CREATE_TYPE"; - case LogicalOperatorType::LOGICAL_CREATE_VIEW: - return "CREATE_VIEW"; - case LogicalOperatorType::LOGICAL_CREATE_SCHEMA: - return "CREATE_SCHEMA"; - case LogicalOperatorType::LOGICAL_DROP: - return "DROP"; - case LogicalOperatorType::LOGICAL_PRAGMA: - return "PRAGMA"; - case LogicalOperatorType::LOGICAL_TRANSACTION: - return "TRANSACTION"; - case LogicalOperatorType::LOGICAL_EXPORT: - return "EXPORT"; - case LogicalOperatorType::LOGICAL_SET: - return "SET"; - case LogicalOperatorType::LOGICAL_LOAD: - return "LOAD"; - case LogicalOperatorType::LOGICAL_INVALID: - break; - } - return "INVALID"; + // Check the dependency manager to verify that there are no conflicting dependencies with this alter + // Note that we do this AFTER the new entry has been entirely set up in the catalog set + // that is because in case the alter fails because of a dependency conflict, we need to be able to cleanly roll back + // to the old entry. + catalog.dependency_manager->AlterObject(context, entry, new_entry); + + return true; } -// LCOV_EXCL_STOP -} // namespace duckdb +void CatalogSet::DropEntryDependencies(ClientContext &context, idx_t entry_index, CatalogEntry &entry, bool cascade) { + // Stores the deleted value of the entry before starting the process + EntryDropper dropper(*this, entry_index); + + // To correctly delete the object and its dependencies, it temporarily is set to deleted. + entries[entry_index].get()->deleted = true; + // check any dependencies of this object + entry.catalog->dependency_manager->DropObject(context, &entry, cascade); + // dropper destructor is called here + // the destructor makes sure to return the value to the previous state + // dropper.~EntryDropper() +} +void CatalogSet::DropEntryInternal(ClientContext &context, idx_t entry_index, CatalogEntry &entry, bool cascade) { + auto &transaction = Transaction::GetTransaction(context); -namespace duckdb { + DropEntryDependencies(context, entry_index, entry, cascade); -struct DefaultOptimizerType { - const char *name; - OptimizerType type; -}; + // create a new entry and replace the currently stored one + // set the timestamp to the timestamp of the current transaction + // and point it at the dummy node + auto value = make_unique(CatalogType::DELETED_ENTRY, entry.catalog, entry.name); + value->timestamp = transaction.transaction_id; + value->child = move(entries[entry_index]); + value->child->parent = value.get(); + value->set = this; + value->deleted = true; -static DefaultOptimizerType internal_optimizer_types[] = { - {"expression_rewriter", OptimizerType::EXPRESSION_REWRITER}, - {"filter_pullup", OptimizerType::FILTER_PULLUP}, - {"filter_pushdown", OptimizerType::FILTER_PUSHDOWN}, - {"regex_range", OptimizerType::REGEX_RANGE}, - {"in_clause", OptimizerType::IN_CLAUSE}, - {"join_order", OptimizerType::JOIN_ORDER}, - {"deliminator", OptimizerType::DELIMINATOR}, - {"unused_columns", OptimizerType::UNUSED_COLUMNS}, - {"statistics_propagation", OptimizerType::STATISTICS_PROPAGATION}, - {"common_subexpressions", OptimizerType::COMMON_SUBEXPRESSIONS}, - {"common_aggregate", OptimizerType::COMMON_AGGREGATE}, - {"column_lifetime", OptimizerType::COLUMN_LIFETIME}, - {"top_n", OptimizerType::TOP_N}, - {"reorder_filter", OptimizerType::REORDER_FILTER}, - {"extension", OptimizerType::EXTENSION}, - {nullptr, OptimizerType::INVALID}}; + // push the old entry in the undo buffer for this transaction + transaction.PushCatalogEntry(value->child.get()); -string OptimizerTypeToString(OptimizerType type) { - for (idx_t i = 0; internal_optimizer_types[i].name; i++) { - if (internal_optimizer_types[i].type == type) { - return internal_optimizer_types[i].name; - } - } - throw InternalException("Invalid optimizer type"); + entries[entry_index] = move(value); } -OptimizerType OptimizerTypeFromString(const string &str) { - for (idx_t i = 0; internal_optimizer_types[i].name; i++) { - if (internal_optimizer_types[i].name == str) { - return internal_optimizer_types[i].type; - } +bool CatalogSet::DropEntry(ClientContext &context, const string &name, bool cascade) { + // lock the catalog for writing + lock_guard write_lock(catalog.write_lock); + // we can only delete an entry that exists + idx_t entry_index; + CatalogEntry *entry; + if (!GetEntryInternal(context, name, entry_index, entry)) { + return false; } - // optimizer not found, construct candidate list - vector optimizer_names; - for (idx_t i = 0; internal_optimizer_types[i].name; i++) { - optimizer_names.emplace_back(internal_optimizer_types[i].name); + if (entry->internal) { + throw CatalogException("Cannot drop entry \"%s\" because it is an internal system entry", entry->name); } - throw ParserException("Optimizer type \"%s\" not recognized\n%s", str, - StringUtil::CandidatesErrorMessage(optimizer_names, str, "Candidate optimizers")); -} - -} // namespace duckdb - -namespace duckdb { + DropEntryInternal(context, entry_index, *entry, cascade); + return true; +} -// LCOV_EXCL_START -string PhysicalOperatorToString(PhysicalOperatorType type) { - switch (type) { - case PhysicalOperatorType::TABLE_SCAN: - return "TABLE_SCAN"; - case PhysicalOperatorType::DUMMY_SCAN: - return "DUMMY_SCAN"; - case PhysicalOperatorType::CHUNK_SCAN: - return "CHUNK_SCAN"; - case PhysicalOperatorType::COLUMN_DATA_SCAN: - return "COLUMN_DATA_SCAN"; - case PhysicalOperatorType::DELIM_SCAN: - return "DELIM_SCAN"; - case PhysicalOperatorType::ORDER_BY: - return "ORDER_BY"; - case PhysicalOperatorType::LIMIT: - return "LIMIT"; - case PhysicalOperatorType::LIMIT_PERCENT: - return "LIMIT_PERCENT"; - case PhysicalOperatorType::STREAMING_LIMIT: - return "STREAMING_LIMIT"; - case PhysicalOperatorType::RESERVOIR_SAMPLE: - return "RESERVOIR_SAMPLE"; - case PhysicalOperatorType::STREAMING_SAMPLE: - return "STREAMING_SAMPLE"; - case PhysicalOperatorType::TOP_N: - return "TOP_N"; - case PhysicalOperatorType::WINDOW: - return "WINDOW"; - case PhysicalOperatorType::STREAMING_WINDOW: - return "STREAMING_WINDOW"; - case PhysicalOperatorType::UNNEST: - return "UNNEST"; - case PhysicalOperatorType::UNGROUPED_AGGREGATE: - return "UNGROUPED_AGGREGATE"; - case PhysicalOperatorType::HASH_GROUP_BY: - return "HASH_GROUP_BY"; - case PhysicalOperatorType::PERFECT_HASH_GROUP_BY: - return "PERFECT_HASH_GROUP_BY"; - case PhysicalOperatorType::FILTER: - return "FILTER"; - case PhysicalOperatorType::PROJECTION: - return "PROJECTION"; - case PhysicalOperatorType::COPY_TO_FILE: - return "COPY_TO_FILE"; - case PhysicalOperatorType::DELIM_JOIN: - return "DELIM_JOIN"; - case PhysicalOperatorType::BLOCKWISE_NL_JOIN: - return "BLOCKWISE_NL_JOIN"; - case PhysicalOperatorType::NESTED_LOOP_JOIN: - return "NESTED_LOOP_JOIN"; - case PhysicalOperatorType::HASH_JOIN: - return "HASH_JOIN"; - case PhysicalOperatorType::INDEX_JOIN: - return "INDEX_JOIN"; - case PhysicalOperatorType::PIECEWISE_MERGE_JOIN: - return "PIECEWISE_MERGE_JOIN"; - case PhysicalOperatorType::IE_JOIN: - return "IE_JOIN"; - case PhysicalOperatorType::CROSS_PRODUCT: - return "CROSS_PRODUCT"; - case PhysicalOperatorType::UNION: - return "UNION"; - case PhysicalOperatorType::INSERT: - return "INSERT"; - case PhysicalOperatorType::DELETE_OPERATOR: - return "DELETE"; - case PhysicalOperatorType::UPDATE: - return "UPDATE"; - case PhysicalOperatorType::EMPTY_RESULT: - return "EMPTY_RESULT"; - case PhysicalOperatorType::CREATE_TABLE: - return "CREATE_TABLE"; - case PhysicalOperatorType::CREATE_TABLE_AS: - return "CREATE_TABLE_AS"; - case PhysicalOperatorType::CREATE_INDEX: - return "CREATE_INDEX"; - case PhysicalOperatorType::EXPLAIN: - return "EXPLAIN"; - case PhysicalOperatorType::EXPLAIN_ANALYZE: - return "EXPLAIN_ANALYZE"; - case PhysicalOperatorType::EXECUTE: - return "EXECUTE"; - case PhysicalOperatorType::VACUUM: - return "VACUUM"; - case PhysicalOperatorType::RECURSIVE_CTE: - return "REC_CTE"; - case PhysicalOperatorType::RECURSIVE_CTE_SCAN: - return "REC_CTE_SCAN"; - case PhysicalOperatorType::EXPRESSION_SCAN: - return "EXPRESSION_SCAN"; - case PhysicalOperatorType::ALTER: - return "ALTER"; - case PhysicalOperatorType::CREATE_SEQUENCE: - return "CREATE_SEQUENCE"; - case PhysicalOperatorType::CREATE_VIEW: - return "CREATE_VIEW"; - case PhysicalOperatorType::CREATE_SCHEMA: - return "CREATE_SCHEMA"; - case PhysicalOperatorType::CREATE_MACRO: - return "CREATE_MACRO"; - case PhysicalOperatorType::DROP: - return "DROP"; - case PhysicalOperatorType::PRAGMA: - return "PRAGMA"; - case PhysicalOperatorType::TRANSACTION: - return "TRANSACTION"; - case PhysicalOperatorType::PREPARE: - return "PREPARE"; - case PhysicalOperatorType::EXPORT: - return "EXPORT"; - case PhysicalOperatorType::SET: - return "SET"; - case PhysicalOperatorType::LOAD: - return "LOAD"; - case PhysicalOperatorType::INOUT_FUNCTION: - return "INOUT_FUNCTION"; - case PhysicalOperatorType::CREATE_TYPE: - return "CREATE_TYPE"; - case PhysicalOperatorType::RESULT_COLLECTOR: - return "RESULT_COLLECTOR"; - case PhysicalOperatorType::INVALID: - break; +void CatalogSet::CleanupEntry(CatalogEntry *catalog_entry) { + // destroy the backed up entry: it is no longer required + D_ASSERT(catalog_entry->parent); + if (catalog_entry->parent->type != CatalogType::UPDATED_ENTRY) { + lock_guard lock(catalog_lock); + if (!catalog_entry->deleted) { + // delete the entry from the dependency manager, if it is not deleted yet + catalog_entry->catalog->dependency_manager->EraseObject(catalog_entry); + } + auto parent = catalog_entry->parent; + parent->child = move(catalog_entry->child); + if (parent->deleted && !parent->child && !parent->parent) { + auto mapping_entry = mapping.find(parent->name); + D_ASSERT(mapping_entry != mapping.end()); + auto index = mapping_entry->second->index; + auto entry = entries.find(index); + D_ASSERT(entry != entries.end()); + if (entry->second.get() == parent) { + mapping.erase(mapping_entry); + entries.erase(entry); + } + } } - return "INVALID"; } -// LCOV_EXCL_STOP - -} // namespace duckdb - - +bool CatalogSet::HasConflict(ClientContext &context, transaction_t timestamp) { + auto &transaction = Transaction::GetTransaction(context); + return (timestamp >= TRANSACTION_ID_START && timestamp != transaction.transaction_id) || + (timestamp < TRANSACTION_ID_START && timestamp > transaction.start_time); +} -namespace duckdb { +MappingValue *CatalogSet::GetMapping(ClientContext &context, const string &name, bool get_latest) { + MappingValue *mapping_value; + auto entry = mapping.find(name); + if (entry != mapping.end()) { + mapping_value = entry->second.get(); + } else { -// LCOV_EXCL_START -string RelationTypeToString(RelationType type) { - switch (type) { - case RelationType::TABLE_RELATION: - return "TABLE_RELATION"; - case RelationType::PROJECTION_RELATION: - return "PROJECTION_RELATION"; - case RelationType::FILTER_RELATION: - return "FILTER_RELATION"; - case RelationType::EXPLAIN_RELATION: - return "EXPLAIN_RELATION"; - case RelationType::CROSS_PRODUCT_RELATION: - return "CROSS_PRODUCT_RELATION"; - case RelationType::JOIN_RELATION: - return "JOIN_RELATION"; - case RelationType::AGGREGATE_RELATION: - return "AGGREGATE_RELATION"; - case RelationType::SET_OPERATION_RELATION: - return "SET_OPERATION_RELATION"; - case RelationType::DISTINCT_RELATION: - return "DISTINCT_RELATION"; - case RelationType::LIMIT_RELATION: - return "LIMIT_RELATION"; - case RelationType::ORDER_RELATION: - return "ORDER_RELATION"; - case RelationType::CREATE_VIEW_RELATION: - return "CREATE_VIEW_RELATION"; - case RelationType::CREATE_TABLE_RELATION: - return "CREATE_TABLE_RELATION"; - case RelationType::INSERT_RELATION: - return "INSERT_RELATION"; - case RelationType::VALUE_LIST_RELATION: - return "VALUE_LIST_RELATION"; - case RelationType::DELETE_RELATION: - return "DELETE_RELATION"; - case RelationType::UPDATE_RELATION: - return "UPDATE_RELATION"; - case RelationType::WRITE_CSV_RELATION: - return "WRITE_CSV_RELATION"; - case RelationType::READ_CSV_RELATION: - return "READ_CSV_RELATION"; - case RelationType::SUBQUERY_RELATION: - return "SUBQUERY_RELATION"; - case RelationType::TABLE_FUNCTION_RELATION: - return "TABLE_FUNCTION_RELATION"; - case RelationType::VIEW_RELATION: - return "VIEW_RELATION"; - case RelationType::QUERY_RELATION: - return "QUERY_RELATION"; - case RelationType::INVALID_RELATION: - break; + return nullptr; } - return "INVALID_RELATION"; + if (get_latest) { + return mapping_value; + } + while (mapping_value->child) { + if (UseTimestamp(context, mapping_value->timestamp)) { + break; + } + mapping_value = mapping_value->child.get(); + D_ASSERT(mapping_value); + } + return mapping_value; } -// LCOV_EXCL_STOP - -} // namespace duckdb +void CatalogSet::PutMapping(ClientContext &context, const string &name, idx_t entry_index) { + auto entry = mapping.find(name); + auto new_value = make_unique(entry_index); + new_value->timestamp = Transaction::GetTransaction(context).transaction_id; + if (entry != mapping.end()) { + if (HasConflict(context, entry->second->timestamp)) { + throw TransactionException("Catalog write-write conflict on name \"%s\"", name); + } + new_value->child = move(entry->second); + new_value->child->parent = new_value.get(); + } + mapping[name] = move(new_value); +} -namespace duckdb { +void CatalogSet::DeleteMapping(ClientContext &context, const string &name) { + auto entry = mapping.find(name); + D_ASSERT(entry != mapping.end()); + auto delete_marker = make_unique(entry->second->index); + delete_marker->deleted = true; + delete_marker->timestamp = Transaction::GetTransaction(context).transaction_id; + delete_marker->child = move(entry->second); + delete_marker->child->parent = delete_marker.get(); + mapping[name] = move(delete_marker); +} -// LCOV_EXCL_START -string StatementTypeToString(StatementType type) { - switch (type) { - case StatementType::SELECT_STATEMENT: - return "SELECT"; - case StatementType::INSERT_STATEMENT: - return "INSERT"; - case StatementType::UPDATE_STATEMENT: - return "UPDATE"; - case StatementType::DELETE_STATEMENT: - return "DELETE"; - case StatementType::PREPARE_STATEMENT: - return "PREPARE"; - case StatementType::EXECUTE_STATEMENT: - return "EXECUTE"; - case StatementType::ALTER_STATEMENT: - return "ALTER"; - case StatementType::TRANSACTION_STATEMENT: - return "TRANSACTION"; - case StatementType::COPY_STATEMENT: - return "COPY"; - case StatementType::ANALYZE_STATEMENT: - return "ANALYZE"; - case StatementType::VARIABLE_SET_STATEMENT: - return "VARIABLE_SET"; - case StatementType::CREATE_FUNC_STATEMENT: - return "CREATE_FUNC"; - case StatementType::EXPLAIN_STATEMENT: - return "EXPLAIN"; - case StatementType::CREATE_STATEMENT: - return "CREATE"; - case StatementType::DROP_STATEMENT: - return "DROP"; - case StatementType::PRAGMA_STATEMENT: - return "PRAGMA"; - case StatementType::SHOW_STATEMENT: - return "SHOW"; - case StatementType::VACUUM_STATEMENT: - return "VACUUM"; - case StatementType::RELATION_STATEMENT: - return "RELATION"; - case StatementType::EXPORT_STATEMENT: - return "EXPORT"; - case StatementType::CALL_STATEMENT: - return "CALL"; - case StatementType::SET_STATEMENT: - return "SET"; - case StatementType::LOAD_STATEMENT: - return "LOAD"; - case StatementType::EXTENSION_STATEMENT: - return "EXTENSION"; - case StatementType::LOGICAL_PLAN_STATEMENT: - return "LOGICAL_PLAN"; - case StatementType::INVALID_STATEMENT: - break; +bool CatalogSet::UseTimestamp(ClientContext &context, transaction_t timestamp) { + auto &transaction = Transaction::GetTransaction(context); + if (timestamp == transaction.transaction_id) { + // we created this version + return true; } - return "INVALID"; + if (timestamp < transaction.start_time) { + // this version was commited before we started the transaction + return true; + } + return false; } -string StatementReturnTypeToString(StatementReturnType type) { - switch (type) { - case StatementReturnType::QUERY_RESULT: - return "QUERY_RESULT"; - case StatementReturnType::CHANGED_ROWS: - return "CHANGED_ROWS"; - case StatementReturnType::NOTHING: - return "NOTHING"; +CatalogEntry *CatalogSet::GetEntryForTransaction(ClientContext &context, CatalogEntry *current) { + while (current->child) { + if (UseTimestamp(context, current->timestamp)) { + break; + } + current = current->child.get(); + D_ASSERT(current); } - return "INVALID"; + return current; } -// LCOV_EXCL_STOP - -} // namespace duckdb - - - - - - -#ifdef DUCKDB_CRASH_ON_ASSERT - -#include -#include -#endif - -namespace duckdb { -Exception::Exception(const string &msg) : std::exception(), type(ExceptionType::INVALID), raw_message_(msg) { - exception_message_ = msg; +CatalogEntry *CatalogSet::GetCommittedEntry(CatalogEntry *current) { + while (current->child) { + if (current->timestamp < TRANSACTION_ID_START) { + // this entry is committed: use it + break; + } + current = current->child.get(); + D_ASSERT(current); + } + return current; } -Exception::Exception(ExceptionType exception_type, const string &message) - : std::exception(), type(exception_type), raw_message_(message) { - exception_message_ = ExceptionTypeToString(exception_type) + " Error: " + message; -} +pair CatalogSet::SimilarEntry(ClientContext &context, const string &name) { + unique_lock lock(catalog_lock); + CreateDefaultEntries(context, lock); -const char *Exception::what() const noexcept { - return exception_message_.c_str(); + string result; + idx_t current_score = (idx_t)-1; + for (auto &kv : mapping) { + auto mapping_value = GetMapping(context, kv.first); + if (mapping_value && !mapping_value->deleted) { + auto ldist = StringUtil::LevenshteinDistance(kv.first, name); + if (ldist < current_score) { + current_score = ldist; + result = kv.first; + } + } + } + return {result, current_score}; } -const string &Exception::RawMessage() const { - return raw_message_; -} +CatalogEntry *CatalogSet::CreateEntryInternal(ClientContext &context, unique_ptr entry) { + if (mapping.find(entry->name) != mapping.end()) { + return nullptr; + } + auto &name = entry->name; + auto entry_index = current_entry++; + auto catalog_entry = entry.get(); -bool Exception::UncaughtException() { -#if __cplusplus >= 201703L - return std::uncaught_exceptions() > 0; -#else - return std::uncaught_exception(); -#endif -} + entry->set = this; + entry->timestamp = 0; -string Exception::ConstructMessageRecursive(const string &msg, vector &values) { - return ExceptionFormatValue::Format(msg, values); + PutMapping(context, name, entry_index); + mapping[name]->timestamp = 0; + entries[entry_index] = move(entry); + return catalog_entry; } -string Exception::ExceptionTypeToString(ExceptionType type) { - switch (type) { - case ExceptionType::INVALID: - return "Invalid"; - case ExceptionType::OUT_OF_RANGE: - return "Out of Range"; - case ExceptionType::CONVERSION: - return "Conversion"; - case ExceptionType::UNKNOWN_TYPE: - return "Unknown Type"; - case ExceptionType::DECIMAL: - return "Decimal"; - case ExceptionType::MISMATCH_TYPE: - return "Mismatch Type"; - case ExceptionType::DIVIDE_BY_ZERO: - return "Divide by Zero"; - case ExceptionType::OBJECT_SIZE: - return "Object Size"; - case ExceptionType::INVALID_TYPE: - return "Invalid type"; - case ExceptionType::SERIALIZATION: - return "Serialization"; - case ExceptionType::TRANSACTION: - return "TransactionContext"; - case ExceptionType::NOT_IMPLEMENTED: - return "Not implemented"; - case ExceptionType::EXPRESSION: - return "Expression"; - case ExceptionType::CATALOG: - return "Catalog"; - case ExceptionType::PARSER: - return "Parser"; - case ExceptionType::BINDER: - return "Binder"; - case ExceptionType::PLANNER: - return "Planner"; - case ExceptionType::SCHEDULER: - return "Scheduler"; - case ExceptionType::EXECUTOR: - return "Executor"; - case ExceptionType::CONSTRAINT: - return "Constraint"; - case ExceptionType::INDEX: - return "Index"; - case ExceptionType::STAT: - return "Stat"; - case ExceptionType::CONNECTION: - return "Connection"; - case ExceptionType::SYNTAX: - return "Syntax"; - case ExceptionType::SETTINGS: - return "Settings"; - case ExceptionType::OPTIMIZER: - return "Optimizer"; - case ExceptionType::NULL_POINTER: - return "NullPointer"; - case ExceptionType::IO: - return "IO"; - case ExceptionType::INTERRUPT: - return "INTERRUPT"; - case ExceptionType::FATAL: - return "FATAL"; - case ExceptionType::INTERNAL: - return "INTERNAL"; - case ExceptionType::INVALID_INPUT: - return "Invalid Input"; - case ExceptionType::OUT_OF_MEMORY: - return "Out of Memory"; - case ExceptionType::PERMISSION: - return "Permission"; - case ExceptionType::PARAMETER_NOT_RESOLVED: - return "Parameter Not Resolved"; - case ExceptionType::PARAMETER_NOT_ALLOWED: - return "Parameter Not Allowed"; - default: - return "Unknown"; +CatalogEntry *CatalogSet::CreateDefaultEntry(ClientContext &context, const string &name, unique_lock &lock) { + // no entry found with this name, check for defaults + if (!defaults || defaults->created_all_entries) { + // no defaults either: return null + return nullptr; } -} + // this catalog set has a default map defined + // check if there is a default entry that we can create with this name + lock.unlock(); + auto entry = defaults->CreateDefaultEntry(context, name); -void Exception::ThrowAsTypeWithMessage(ExceptionType type, const string &message) { - switch (type) { - case ExceptionType::OUT_OF_RANGE: - throw OutOfRangeException(message); - case ExceptionType::CONVERSION: - throw ConversionException(message); // FIXME: make a separation between Conversion/Cast exception? - case ExceptionType::INVALID_TYPE: - throw InvalidTypeException(message); - case ExceptionType::MISMATCH_TYPE: - throw TypeMismatchException(message); - case ExceptionType::TRANSACTION: - throw TransactionException(message); - case ExceptionType::NOT_IMPLEMENTED: - throw NotImplementedException(message); - case ExceptionType::CATALOG: - throw CatalogException(message); - case ExceptionType::CONNECTION: - throw ConnectionException(message); - case ExceptionType::PARSER: - throw ParserException(message); - case ExceptionType::PERMISSION: - throw PermissionException(message); - case ExceptionType::SYNTAX: - throw SyntaxException(message); - case ExceptionType::CONSTRAINT: - throw ConstraintException(message); - case ExceptionType::BINDER: - throw BinderException(message); - case ExceptionType::IO: - throw IOException(message); - case ExceptionType::SERIALIZATION: - throw SerializationException(message); - case ExceptionType::INTERRUPT: - throw InterruptException(); - case ExceptionType::INTERNAL: - throw InternalException(message); - case ExceptionType::INVALID_INPUT: - throw InvalidInputException(message); - case ExceptionType::OUT_OF_MEMORY: - throw OutOfMemoryException(message); - case ExceptionType::PARAMETER_NOT_ALLOWED: - throw ParameterNotAllowedException(message); - case ExceptionType::PARAMETER_NOT_RESOLVED: - throw ParameterNotResolvedException(); - case ExceptionType::FATAL: - throw FatalException(message); - default: - throw Exception(type, message); + lock.lock(); + if (!entry) { + // no default entry + return nullptr; + } + // there is a default entry! create it + auto result = CreateEntryInternal(context, move(entry)); + if (result) { + return result; } + // we found a default entry, but failed + // this means somebody else created the entry first + // just retry? + lock.unlock(); + return GetEntry(context, name); } -StandardException::StandardException(ExceptionType exception_type, const string &message) - : Exception(exception_type, message) { -} +CatalogEntry *CatalogSet::GetEntry(ClientContext &context, const string &name) { + unique_lock lock(catalog_lock); + auto mapping_value = GetMapping(context, name); + if (mapping_value != nullptr && !mapping_value->deleted) { + // we found an entry for this name + // check the version numbers -CastException::CastException(const PhysicalType orig_type, const PhysicalType new_type) - : Exception(ExceptionType::CONVERSION, - "Type " + TypeIdToString(orig_type) + " can't be cast as " + TypeIdToString(new_type)) { + auto catalog_entry = entries[mapping_value->index].get(); + CatalogEntry *current = GetEntryForTransaction(context, catalog_entry); + if (current->deleted || (current->name != name && !UseTimestamp(context, mapping_value->timestamp))) { + return nullptr; + } + return current; + } + return CreateDefaultEntry(context, name, lock); } -CastException::CastException(const LogicalType &orig_type, const LogicalType &new_type) - : Exception(ExceptionType::CONVERSION, - "Type " + orig_type.ToString() + " can't be cast as " + new_type.ToString()) { +void CatalogSet::UpdateTimestamp(CatalogEntry *entry, transaction_t timestamp) { + entry->timestamp = timestamp; + mapping[entry->name]->timestamp = timestamp; } -CastException::CastException(const string &msg) : Exception(ExceptionType::CONVERSION, msg) { +void CatalogSet::AdjustUserDependency(CatalogEntry *entry, ColumnDefinition &column, bool remove) { + CatalogEntry *user_type_catalog = (CatalogEntry *)LogicalType::GetCatalog(column.Type()); + if (user_type_catalog) { + if (remove) { + catalog.dependency_manager->dependents_map[user_type_catalog].erase(entry->parent); + catalog.dependency_manager->dependencies_map[entry->parent].erase(user_type_catalog); + } else { + catalog.dependency_manager->dependents_map[user_type_catalog].insert(entry); + catalog.dependency_manager->dependencies_map[entry].insert(user_type_catalog); + } + } } -ValueOutOfRangeException::ValueOutOfRangeException(const int64_t value, const PhysicalType orig_type, - const PhysicalType new_type) - : Exception(ExceptionType::CONVERSION, "Type " + TypeIdToString(orig_type) + " with value " + - to_string((intmax_t)value) + - " can't be cast because the value is out of range " - "for the destination type " + - TypeIdToString(new_type)) { +void CatalogSet::AdjustDependency(CatalogEntry *entry, TableCatalogEntry *table, ColumnDefinition &column, + bool remove) { + bool found = false; + if (column.Type().id() == LogicalTypeId::ENUM) { + for (auto &old_column : table->columns.Logical()) { + if (old_column.Name() == column.Name() && old_column.Type().id() != LogicalTypeId::ENUM) { + AdjustUserDependency(entry, column, remove); + found = true; + } + } + if (!found) { + AdjustUserDependency(entry, column, remove); + } + } else if (!(column.Type().GetAlias().empty())) { + auto alias = column.Type().GetAlias(); + for (auto &old_column : table->columns.Logical()) { + auto old_alias = old_column.Type().GetAlias(); + if (old_column.Name() == column.Name() && old_alias != alias) { + AdjustUserDependency(entry, column, remove); + found = true; + } + } + if (!found) { + AdjustUserDependency(entry, column, remove); + } + } } -ValueOutOfRangeException::ValueOutOfRangeException(const double value, const PhysicalType orig_type, - const PhysicalType new_type) - : Exception(ExceptionType::CONVERSION, "Type " + TypeIdToString(orig_type) + " with value " + to_string(value) + - " can't be cast because the value is out of range " - "for the destination type " + - TypeIdToString(new_type)) { -} +void CatalogSet::AdjustTableDependencies(CatalogEntry *entry) { + if (entry->type == CatalogType::TABLE_ENTRY && entry->parent->type == CatalogType::TABLE_ENTRY) { + // If it's a table entry we have to check for possibly removing or adding user type dependencies + auto old_table = (TableCatalogEntry *)entry->parent; + auto new_table = (TableCatalogEntry *)entry; -ValueOutOfRangeException::ValueOutOfRangeException(const hugeint_t value, const PhysicalType orig_type, - const PhysicalType new_type) - : Exception(ExceptionType::CONVERSION, "Type " + TypeIdToString(orig_type) + " with value " + value.ToString() + - " can't be cast because the value is out of range " - "for the destination type " + - TypeIdToString(new_type)) { + for (idx_t i = 0; i < new_table->columns.LogicalColumnCount(); i++) { + auto &new_column = new_table->columns.GetColumnMutable(LogicalIndex(i)); + AdjustDependency(entry, old_table, new_column, false); + } + for (idx_t i = 0; i < old_table->columns.LogicalColumnCount(); i++) { + auto &old_column = old_table->columns.GetColumnMutable(LogicalIndex(i)); + AdjustDependency(entry, new_table, old_column, true); + } + } } -ValueOutOfRangeException::ValueOutOfRangeException(const PhysicalType var_type, const idx_t length) - : Exception(ExceptionType::OUT_OF_RANGE, - "The value is too long to fit into type " + TypeIdToString(var_type) + "(" + to_string(length) + ")") { -} +void CatalogSet::Undo(CatalogEntry *entry) { + lock_guard write_lock(catalog.write_lock); -ValueOutOfRangeException::ValueOutOfRangeException(const string &msg) : Exception(ExceptionType::OUT_OF_RANGE, msg) { -} + lock_guard lock(catalog_lock); -ConversionException::ConversionException(const string &msg) : Exception(ExceptionType::CONVERSION, msg) { -} + // entry has to be restored + // and entry->parent has to be removed ("rolled back") -InvalidTypeException::InvalidTypeException(PhysicalType type, const string &msg) - : Exception(ExceptionType::INVALID_TYPE, "Invalid Type [" + TypeIdToString(type) + "]: " + msg) { -} + // i.e. we have to place (entry) as (entry->parent) again + auto &to_be_removed_node = entry->parent; -InvalidTypeException::InvalidTypeException(const LogicalType &type, const string &msg) - : Exception(ExceptionType::INVALID_TYPE, "Invalid Type [" + type.ToString() + "]: " + msg) { -} + AdjustTableDependencies(entry); -InvalidTypeException::InvalidTypeException(const string &msg) : Exception(ExceptionType::INVALID_TYPE, msg) { -} + if (!to_be_removed_node->deleted) { + // delete the entry from the dependency manager as well + catalog.dependency_manager->EraseObject(to_be_removed_node); + } + if (entry->name != to_be_removed_node->name) { + // rename: clean up the new name when the rename is rolled back + auto removed_entry = mapping.find(to_be_removed_node->name); + if (removed_entry->second->child) { + removed_entry->second->child->parent = nullptr; + mapping[to_be_removed_node->name] = move(removed_entry->second->child); + } else { + mapping.erase(removed_entry); + } + } + if (to_be_removed_node->parent) { + // if the to be removed node has a parent, set the child pointer to the + // to be restored node + to_be_removed_node->parent->child = move(to_be_removed_node->child); + entry->parent = to_be_removed_node->parent; + } else { + // otherwise we need to update the base entry tables + auto &name = entry->name; + to_be_removed_node->child->SetAsRoot(); + entries[mapping[name]->index] = move(to_be_removed_node->child); + entry->parent = nullptr; + } -TypeMismatchException::TypeMismatchException(const PhysicalType type_1, const PhysicalType type_2, const string &msg) - : Exception(ExceptionType::MISMATCH_TYPE, - "Type " + TypeIdToString(type_1) + " does not match with " + TypeIdToString(type_2) + ". " + msg) { + // restore the name if it was deleted + auto restored_entry = mapping.find(entry->name); + if (restored_entry->second->deleted || entry->type == CatalogType::INVALID) { + if (restored_entry->second->child) { + restored_entry->second->child->parent = nullptr; + mapping[entry->name] = move(restored_entry->second->child); + } else { + mapping.erase(restored_entry); + } + } + // we mark the catalog as being modified, since this action can lead to e.g. tables being dropped + entry->catalog->ModifyCatalog(); } -TypeMismatchException::TypeMismatchException(const LogicalType &type_1, const LogicalType &type_2, const string &msg) - : Exception(ExceptionType::MISMATCH_TYPE, - "Type " + type_1.ToString() + " does not match with " + type_2.ToString() + ". " + msg) { -} +void CatalogSet::CreateDefaultEntries(ClientContext &context, unique_lock &lock) { + if (!defaults || defaults->created_all_entries) { + return; + } + // this catalog set has a default set defined: + auto default_entries = defaults->GetDefaultEntries(); + for (auto &default_entry : default_entries) { + auto map_entry = mapping.find(default_entry); + if (map_entry == mapping.end()) { + // we unlock during the CreateEntry, since it might reference other catalog sets... + // specifically for views this can happen since the view will be bound + lock.unlock(); + auto entry = defaults->CreateDefaultEntry(context, default_entry); + if (!entry) { + throw InternalException("Failed to create default entry for %s", default_entry); + } -TypeMismatchException::TypeMismatchException(const string &msg) : Exception(ExceptionType::MISMATCH_TYPE, msg) { + lock.lock(); + CreateEntryInternal(context, move(entry)); + } + } + defaults->created_all_entries = true; } -TransactionException::TransactionException(const string &msg) : Exception(ExceptionType::TRANSACTION, msg) { -} +void CatalogSet::Scan(ClientContext &context, const std::function &callback) { + // lock the catalog set + unique_lock lock(catalog_lock); + CreateDefaultEntries(context, lock); -NotImplementedException::NotImplementedException(const string &msg) : Exception(ExceptionType::NOT_IMPLEMENTED, msg) { + for (auto &kv : entries) { + auto entry = kv.second.get(); + entry = GetEntryForTransaction(context, entry); + if (!entry->deleted) { + callback(entry); + } + } } -OutOfRangeException::OutOfRangeException(const string &msg) : Exception(ExceptionType::OUT_OF_RANGE, msg) { +void CatalogSet::Scan(const std::function &callback) { + // lock the catalog set + lock_guard lock(catalog_lock); + for (auto &kv : entries) { + auto entry = kv.second.get(); + entry = GetCommittedEntry(entry); + if (!entry->deleted) { + callback(entry); + } + } } +} // namespace duckdb -CatalogException::CatalogException(const string &msg) : StandardException(ExceptionType::CATALOG, msg) { -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/parser.hpp +// +// +//===----------------------------------------------------------------------===// -ConnectionException::ConnectionException(const string &msg) : StandardException(ExceptionType::CONNECTION, msg) { -} -ParserException::ParserException(const string &msg) : StandardException(ExceptionType::PARSER, msg) { -} -PermissionException::PermissionException(const string &msg) : StandardException(ExceptionType::PERMISSION, msg) { -} -SyntaxException::SyntaxException(const string &msg) : Exception(ExceptionType::SYNTAX, msg) { -} -ConstraintException::ConstraintException(const string &msg) : Exception(ExceptionType::CONSTRAINT, msg) { -} -BinderException::BinderException(const string &msg) : StandardException(ExceptionType::BINDER, msg) { -} -IOException::IOException(const string &msg) : Exception(ExceptionType::IO, msg) { -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/parser/simplified_token.hpp +// +// +//===----------------------------------------------------------------------===// -SerializationException::SerializationException(const string &msg) : Exception(ExceptionType::SERIALIZATION, msg) { -} -SequenceException::SequenceException(const string &msg) : Exception(ExceptionType::SERIALIZATION, msg) { -} -InterruptException::InterruptException() : Exception(ExceptionType::INTERRUPT, "Interrupted!") { -} -FatalException::FatalException(ExceptionType type, const string &msg) : Exception(type, msg) { -} -InternalException::InternalException(const string &msg) : FatalException(ExceptionType::INTERNAL, msg) { -#ifdef DUCKDB_CRASH_ON_ASSERT - Printer::Print("ABORT THROWN BY INTERNAL EXCEPTION: " + msg); - abort(); -#endif -} +namespace duckdb { -InvalidInputException::InvalidInputException(const string &msg) : Exception(ExceptionType::INVALID_INPUT, msg) { -} +//! Simplified tokens are a simplified (dense) representation of the lexer +//! Used for simple syntax highlighting in the tests +enum class SimplifiedTokenType : uint8_t { + SIMPLIFIED_TOKEN_IDENTIFIER, + SIMPLIFIED_TOKEN_NUMERIC_CONSTANT, + SIMPLIFIED_TOKEN_STRING_CONSTANT, + SIMPLIFIED_TOKEN_OPERATOR, + SIMPLIFIED_TOKEN_KEYWORD, + SIMPLIFIED_TOKEN_COMMENT +}; -OutOfMemoryException::OutOfMemoryException(const string &msg) : Exception(ExceptionType::OUT_OF_MEMORY, msg) { -} +struct SimplifiedToken { + SimplifiedTokenType type; + idx_t start; +}; -ParameterNotAllowedException::ParameterNotAllowedException(const string &msg) - : StandardException(ExceptionType::PARAMETER_NOT_ALLOWED, msg) { -} +enum class KeywordCategory : uint8_t { KEYWORD_RESERVED, KEYWORD_UNRESERVED, KEYWORD_TYPE_FUNC, KEYWORD_COL_NAME }; -ParameterNotResolvedException::ParameterNotResolvedException() - : Exception(ExceptionType::PARAMETER_NOT_RESOLVED, "Parameter types could not be resolved") { -} +struct ParserKeyword { + string name; + KeywordCategory category; +}; } // namespace duckdb +namespace duckdb_libpgquery { +struct PGNode; +struct PGList; +} // namespace duckdb_libpgquery +namespace duckdb { +class ParserExtension; -// LICENSE_CHANGE_BEGIN -// The following code up to LICENSE_CHANGE_END is subject to THIRD PARTY LICENSE #2 -// See the end of this file for a list - -/* - Formatting library for C++ +struct ParserOptions { + bool preserve_identifier_case = true; + idx_t max_expression_depth = 1000; + const vector *extensions = nullptr; +}; - Copyright (c) 2012 - present, Victor Zverovich +//! The parser is responsible for parsing the query and converting it into a set +//! of parsed statements. The parsed statements can then be converted into a +//! plan and executed. +class Parser { +public: + Parser(ParserOptions options = ParserOptions()); - Permission is hereby granted, free of charge, to any person obtaining - a copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, - distribute, sublicense, and/or sell copies of the Software, and to - permit persons to whom the Software is furnished to do so, subject to - the following conditions: + //! The parsed SQL statements from an invocation to ParseQuery. + vector> statements; - The above copyright notice and this permission notice shall be - included in all copies or substantial portions of the Software. +public: + //! Attempts to parse a query into a series of SQL statements. Returns + //! whether or not the parsing was successful. If the parsing was + //! successful, the parsed statements will be stored in the statements + //! variable. + void ParseQuery(const string &query); - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE - LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION - OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + //! Tokenize a query, returning the raw tokens together with their locations + static vector Tokenize(const string &query); - --- Optional exception to the license --- + //! Returns true if the given text matches a keyword of the parser + static bool IsKeyword(const string &text); + //! Returns a list of all keywords in the parser + static vector KeywordList(); - As an exception, if, as a result of your compiling your source code, portions - of this Software are embedded into a machine-executable object form of such - source code, you may redistribute such embedded portions in such object form - without including the above copyright and permission notices. - */ + //! Parses a list of expressions (i.e. the list found in a SELECT clause) + DUCKDB_API static vector> ParseExpressionList(const string &select_list, + ParserOptions options = ParserOptions()); + //! Parses a list as found in an ORDER BY expression (i.e. including optional ASCENDING/DESCENDING modifiers) + static vector ParseOrderList(const string &select_list, ParserOptions options = ParserOptions()); + //! Parses an update list (i.e. the list found in the SET clause of an UPDATE statement) + static void ParseUpdateList(const string &update_list, vector &update_columns, + vector> &expressions, + ParserOptions options = ParserOptions()); + //! Parses a VALUES list (i.e. the list of expressions after a VALUES clause) + static vector>> ParseValuesList(const string &value_list, + ParserOptions options = ParserOptions()); + //! Parses a column list (i.e. as found in a CREATE TABLE statement) + static ColumnList ParseColumnList(const string &column_list, ParserOptions options = ParserOptions()); -#ifndef FMT_FORMAT_H_ -#define FMT_FORMAT_H_ +private: + ParserOptions options; +}; +} // namespace duckdb -// LICENSE_CHANGE_BEGIN -// The following code up to LICENSE_CHANGE_END is subject to THIRD PARTY LICENSE #2 -// See the end of this file for a list -// Formatting library for C++ - the core API -// -// Copyright (c) 2012 - present, Victor Zverovich -// All rights reserved. -// -// For the license information refer to format.h. -#ifndef FMT_CORE_H_ -#define FMT_CORE_H_ -#include // std::FILE -#include -#include -#include -#include -// The fmt library version in the form major * 10000 + minor * 100 + patch. -#define FMT_VERSION 60102 +namespace duckdb { -#ifdef __has_feature -# define FMT_HAS_FEATURE(x) __has_feature(x) -#else -# define FMT_HAS_FEATURE(x) 0 -#endif +static DefaultMacro internal_macros[] = { + {DEFAULT_SCHEMA, "current_user", {nullptr}, "'duckdb'"}, // user name of current execution context + {DEFAULT_SCHEMA, "current_catalog", {nullptr}, "'duckdb'"}, // name of current database (called "catalog" in the SQL standard) + {DEFAULT_SCHEMA, "current_database", {nullptr}, "'duckdb'"}, // name of current database + {DEFAULT_SCHEMA, "user", {nullptr}, "current_user"}, // equivalent to current_user + {DEFAULT_SCHEMA, "session_user", {nullptr}, "'duckdb'"}, // session user name + {"pg_catalog", "inet_client_addr", {nullptr}, "NULL"}, // address of the remote connection + {"pg_catalog", "inet_client_port", {nullptr}, "NULL"}, // port of the remote connection + {"pg_catalog", "inet_server_addr", {nullptr}, "NULL"}, // address of the local connection + {"pg_catalog", "inet_server_port", {nullptr}, "NULL"}, // port of the local connection + {"pg_catalog", "pg_my_temp_schema", {nullptr}, "0"}, // OID of session's temporary schema, or 0 if none + {"pg_catalog", "pg_is_other_temp_schema", {"schema_id", nullptr}, "false"}, // is schema another session's temporary schema? -#if defined(__has_include) && !defined(__INTELLISENSE__) && \ - !(defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1600) -# define FMT_HAS_INCLUDE(x) __has_include(x) -#else -# define FMT_HAS_INCLUDE(x) 0 -#endif + {"pg_catalog", "pg_conf_load_time", {nullptr}, "current_timestamp"}, // configuration load time + {"pg_catalog", "pg_postmaster_start_time", {nullptr}, "current_timestamp"}, // server start time -#ifdef __has_cpp_attribute -# define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) -#else -# define FMT_HAS_CPP_ATTRIBUTE(x) 0 -#endif + {"pg_catalog", "pg_typeof", {"expression", nullptr}, "lower(typeof(expression))"}, // get the data type of any value -#if defined(__GNUC__) && !defined(__clang__) -# define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) -#else -# define FMT_GCC_VERSION 0 -#endif + // privilege functions + // {"has_any_column_privilege", {"user", "table", "privilege", nullptr}, "true"}, //boolean //does user have privilege for any column of table + {"pg_catalog", "has_any_column_privilege", {"table", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for any column of table + // {"has_column_privilege", {"user", "table", "column", "privilege", nullptr}, "true"}, //boolean //does user have privilege for column + {"pg_catalog", "has_column_privilege", {"table", "column", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for column + // {"has_database_privilege", {"user", "database", "privilege", nullptr}, "true"}, //boolean //does user have privilege for database + {"pg_catalog", "has_database_privilege", {"database", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for database + // {"has_foreign_data_wrapper_privilege", {"user", "fdw", "privilege", nullptr}, "true"}, //boolean //does user have privilege for foreign-data wrapper + {"pg_catalog", "has_foreign_data_wrapper_privilege", {"fdw", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for foreign-data wrapper + // {"has_function_privilege", {"user", "function", "privilege", nullptr}, "true"}, //boolean //does user have privilege for function + {"pg_catalog", "has_function_privilege", {"function", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for function + // {"has_language_privilege", {"user", "language", "privilege", nullptr}, "true"}, //boolean //does user have privilege for language + {"pg_catalog", "has_language_privilege", {"language", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for language + // {"has_schema_privilege", {"user", "schema, privilege", nullptr}, "true"}, //boolean //does user have privilege for schema + {"pg_catalog", "has_schema_privilege", {"schema", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for schema + // {"has_sequence_privilege", {"user", "sequence", "privilege", nullptr}, "true"}, //boolean //does user have privilege for sequence + {"pg_catalog", "has_sequence_privilege", {"sequence", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for sequence + // {"has_server_privilege", {"user", "server", "privilege", nullptr}, "true"}, //boolean //does user have privilege for foreign server + {"pg_catalog", "has_server_privilege", {"server", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for foreign server + // {"has_table_privilege", {"user", "table", "privilege", nullptr}, "true"}, //boolean //does user have privilege for table + {"pg_catalog", "has_table_privilege", {"table", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for table + // {"has_tablespace_privilege", {"user", "tablespace", "privilege", nullptr}, "true"}, //boolean //does user have privilege for tablespace + {"pg_catalog", "has_tablespace_privilege", {"tablespace", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for tablespace -#if __cplusplus >= 201103L || defined(__GXX_EXPERIMENTAL_CXX0X__) -# define FMT_HAS_GXX_CXX11 FMT_GCC_VERSION -#else -# define FMT_HAS_GXX_CXX11 0 -#endif + // various postgres system functions + {"pg_catalog", "pg_get_viewdef", {"oid", nullptr}, "(select sql from duckdb_views() v where v.view_oid=oid)"}, + {"pg_catalog", "pg_get_constraintdef", {"constraint_oid", "pretty_bool", nullptr}, "(select constraint_text from duckdb_constraints() d_constraint where d_constraint.table_oid=constraint_oid/1000000 and d_constraint.constraint_index=constraint_oid%1000000)"}, + {"pg_catalog", "pg_get_expr", {"pg_node_tree", "relation_oid", nullptr}, "pg_node_tree"}, + {"pg_catalog", "format_pg_type", {"type_name", nullptr}, "case when logical_type='FLOAT' then 'real' when logical_type='DOUBLE' then 'double precision' when logical_type='DECIMAL' then 'numeric' when logical_type='VARCHAR' then 'character varying' when logical_type='BLOB' then 'bytea' when logical_type='TIMESTAMP' then 'timestamp without time zone' when logical_type='TIME' then 'time without time zone' else lower(logical_type) end"}, + {"pg_catalog", "format_type", {"type_oid", "typemod", nullptr}, "(select format_pg_type(type_name) from duckdb_types() t where t.type_oid=type_oid) || case when typemod>0 then concat('(', typemod/1000, ',', typemod%1000, ')') else '' end"}, -#ifdef __NVCC__ -# define FMT_NVCC __NVCC__ -#else -# define FMT_NVCC 0 -#endif + {"pg_catalog", "pg_has_role", {"user", "role", "privilege", nullptr}, "true"}, //boolean //does user have privilege for role + {"pg_catalog", "pg_has_role", {"role", "privilege", nullptr}, "true"}, //boolean //does current user have privilege for role -#ifdef _MSC_VER -# define FMT_MSC_VER _MSC_VER -#else -# define FMT_MSC_VER 0 -#endif + {"pg_catalog", "col_description", {"table_oid", "column_number", nullptr}, "NULL"}, // get comment for a table column + {"pg_catalog", "obj_description", {"object_oid", "catalog_name", nullptr}, "NULL"}, // get comment for a database object + {"pg_catalog", "shobj_description", {"object_oid", "catalog_name", nullptr}, "NULL"}, // get comment for a shared database object -// Check if relaxed C++14 constexpr is supported. -// GCC doesn't allow throw in constexpr until version 6 (bug 67371). -#ifndef FMT_USE_CONSTEXPR -# define FMT_USE_CONSTEXPR \ - (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VER >= 1910 || \ - (FMT_GCC_VERSION >= 600 && __cplusplus >= 201402L)) && \ - !FMT_NVCC -#endif -#if FMT_USE_CONSTEXPR -# define FMT_CONSTEXPR constexpr -# define FMT_CONSTEXPR_DECL constexpr -#else -# define FMT_CONSTEXPR inline -# define FMT_CONSTEXPR_DECL -#endif + // visibility functions + {"pg_catalog", "pg_collation_is_visible", {"collation_oid", nullptr}, "true"}, + {"pg_catalog", "pg_conversion_is_visible", {"conversion_oid", nullptr}, "true"}, + {"pg_catalog", "pg_function_is_visible", {"function_oid", nullptr}, "true"}, + {"pg_catalog", "pg_opclass_is_visible", {"opclass_oid", nullptr}, "true"}, + {"pg_catalog", "pg_operator_is_visible", {"operator_oid", nullptr}, "true"}, + {"pg_catalog", "pg_opfamily_is_visible", {"opclass_oid", nullptr}, "true"}, + {"pg_catalog", "pg_table_is_visible", {"table_oid", nullptr}, "true"}, + {"pg_catalog", "pg_ts_config_is_visible", {"config_oid", nullptr}, "true"}, + {"pg_catalog", "pg_ts_dict_is_visible", {"dict_oid", nullptr}, "true"}, + {"pg_catalog", "pg_ts_parser_is_visible", {"parser_oid", nullptr}, "true"}, + {"pg_catalog", "pg_ts_template_is_visible", {"template_oid", nullptr}, "true"}, + {"pg_catalog", "pg_type_is_visible", {"type_oid", nullptr}, "true"}, -#ifndef FMT_OVERRIDE -# if FMT_HAS_FEATURE(cxx_override) || \ - (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 -# define FMT_OVERRIDE override -# else -# define FMT_OVERRIDE -# endif -#endif + {DEFAULT_SCHEMA, "round_even", {"x", "n", nullptr}, "CASE ((abs(x) * power(10, n+1)) % 10) WHEN 5 THEN round(x/2, n) * 2 ELSE round(x, n) END"}, + {DEFAULT_SCHEMA, "roundbankers", {"x", "n", nullptr}, "round_even(x, n)"}, + {DEFAULT_SCHEMA, "nullif", {"a", "b", nullptr}, "CASE WHEN a=b THEN NULL ELSE a END"}, + {DEFAULT_SCHEMA, "list_append", {"l", "e", nullptr}, "list_concat(l, list_value(e))"}, + {DEFAULT_SCHEMA, "array_append", {"arr", "el", nullptr}, "list_append(arr, el)"}, + {DEFAULT_SCHEMA, "list_prepend", {"e", "l", nullptr}, "list_concat(list_value(e), l)"}, + {DEFAULT_SCHEMA, "array_prepend", {"el", "arr", nullptr}, "list_prepend(el, arr)"}, + {DEFAULT_SCHEMA, "array_pop_back", {"arr", nullptr}, "arr[:LEN(arr)-1]"}, + {DEFAULT_SCHEMA, "array_pop_front", {"arr", nullptr}, "arr[2:]"}, + {DEFAULT_SCHEMA, "array_push_back", {"arr", "e", nullptr}, "list_concat(arr, list_value(e))"}, + {DEFAULT_SCHEMA, "array_push_front", {"arr", "e", nullptr}, "list_concat(list_value(e), arr)"}, + {DEFAULT_SCHEMA, "generate_subscripts", {"arr", "dim", nullptr}, "unnest(generate_series(1, array_length(arr, dim)))"}, + {DEFAULT_SCHEMA, "fdiv", {"x", "y", nullptr}, "floor(x/y)"}, + {DEFAULT_SCHEMA, "fmod", {"x", "y", nullptr}, "(x-y*floor(x/y))"}, -// Check if exceptions are disabled. -#ifndef FMT_EXCEPTIONS -# if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || \ - FMT_MSC_VER && !_HAS_EXCEPTIONS -# define FMT_EXCEPTIONS 0 -# else -# define FMT_EXCEPTIONS 1 -# endif -#endif + // algebraic list aggregates + {DEFAULT_SCHEMA, "list_avg", {"l", nullptr}, "list_aggr(l, 'avg')"}, + {DEFAULT_SCHEMA, "list_var_samp", {"l", nullptr}, "list_aggr(l, 'var_samp')"}, + {DEFAULT_SCHEMA, "list_var_pop", {"l", nullptr}, "list_aggr(l, 'var_pop')"}, + {DEFAULT_SCHEMA, "list_stddev_pop", {"l", nullptr}, "list_aggr(l, 'stddev_pop')"}, + {DEFAULT_SCHEMA, "list_stddev_samp", {"l", nullptr}, "list_aggr(l, 'stddev_samp')"}, + {DEFAULT_SCHEMA, "list_sem", {"l", nullptr}, "list_aggr(l, 'sem')"}, -// Define FMT_USE_NOEXCEPT to make fmt use noexcept (C++11 feature). -#ifndef FMT_USE_NOEXCEPT -# define FMT_USE_NOEXCEPT 0 -#endif + // distributive list aggregates + {DEFAULT_SCHEMA, "list_approx_count_distinct", {"l", nullptr}, "list_aggr(l, 'approx_count_distinct')"}, + {DEFAULT_SCHEMA, "list_bit_xor", {"l", nullptr}, "list_aggr(l, 'bit_xor')"}, + {DEFAULT_SCHEMA, "list_bit_or", {"l", nullptr}, "list_aggr(l, 'bit_or')"}, + {DEFAULT_SCHEMA, "list_bit_and", {"l", nullptr}, "list_aggr(l, 'bit_and')"}, + {DEFAULT_SCHEMA, "list_bool_and", {"l", nullptr}, "list_aggr(l, 'bool_and')"}, + {DEFAULT_SCHEMA, "list_bool_or", {"l", nullptr}, "list_aggr(l, 'bool_or')"}, + {DEFAULT_SCHEMA, "list_count", {"l", nullptr}, "list_aggr(l, 'count')"}, + {DEFAULT_SCHEMA, "list_entropy", {"l", nullptr}, "list_aggr(l, 'entropy')"}, + {DEFAULT_SCHEMA, "list_last", {"l", nullptr}, "list_aggr(l, 'last')"}, + {DEFAULT_SCHEMA, "list_first", {"l", nullptr}, "list_aggr(l, 'first')"}, + {DEFAULT_SCHEMA, "list_any_value", {"l", nullptr}, "list_aggr(l, 'any_value')"}, + {DEFAULT_SCHEMA, "list_kurtosis", {"l", nullptr}, "list_aggr(l, 'kurtosis')"}, + {DEFAULT_SCHEMA, "list_min", {"l", nullptr}, "list_aggr(l, 'min')"}, + {DEFAULT_SCHEMA, "list_max", {"l", nullptr}, "list_aggr(l, 'max')"}, + {DEFAULT_SCHEMA, "list_product", {"l", nullptr}, "list_aggr(l, 'product')"}, + {DEFAULT_SCHEMA, "list_skewness", {"l", nullptr}, "list_aggr(l, 'skewness')"}, + {DEFAULT_SCHEMA, "list_sum", {"l", nullptr}, "list_aggr(l, 'sum')"}, + {DEFAULT_SCHEMA, "list_string_agg", {"l", nullptr}, "list_aggr(l, 'string_agg')"}, -#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || \ - (FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900 -# define FMT_DETECTED_NOEXCEPT noexcept -# define FMT_HAS_CXX11_NOEXCEPT 1 -#else -# define FMT_DETECTED_NOEXCEPT throw() -# define FMT_HAS_CXX11_NOEXCEPT 0 -#endif + // holistic list aggregates + {DEFAULT_SCHEMA, "list_mode", {"l", nullptr}, "list_aggr(l, 'mode')"}, + {DEFAULT_SCHEMA, "list_median", {"l", nullptr}, "list_aggr(l, 'median')"}, + {DEFAULT_SCHEMA, "list_mad", {"l", nullptr}, "list_aggr(l, 'mad')"}, -#ifndef FMT_NOEXCEPT -# if FMT_EXCEPTIONS || FMT_HAS_CXX11_NOEXCEPT -# define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT -# else -# define FMT_NOEXCEPT -# endif -#endif + // nested list aggregates + {DEFAULT_SCHEMA, "list_histogram", {"l", nullptr}, "list_aggr(l, 'histogram')"}, -// [[noreturn]] is disabled on MSVC because of bogus unreachable code warnings. -#if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VER -# define FMT_NORETURN [[noreturn]] -#else -# define FMT_NORETURN -#endif + {nullptr, nullptr, {nullptr}, nullptr} + }; -#ifndef FMT_DEPRECATED -# if (FMT_HAS_CPP_ATTRIBUTE(deprecated) && __cplusplus >= 201402L) || \ - FMT_MSC_VER >= 1900 -# define FMT_DEPRECATED [[deprecated]] -# else -# if defined(__GNUC__) || defined(__clang__) -# define FMT_DEPRECATED __attribute__((deprecated)) -# elif FMT_MSC_VER -# define FMT_DEPRECATED __declspec(deprecated) -# else -# define FMT_DEPRECATED /* deprecated */ -# endif -# endif -#endif +unique_ptr DefaultFunctionGenerator::CreateInternalTableMacroInfo(DefaultMacro &default_macro, unique_ptr function) { + for (idx_t param_idx = 0; default_macro.parameters[param_idx] != nullptr; param_idx++) { + function->parameters.push_back( + make_unique(default_macro.parameters[param_idx])); + } -// Workaround broken [[deprecated]] in the Intel compiler and NVCC. -#if defined(__INTEL_COMPILER) || FMT_NVCC -# define FMT_DEPRECATED_ALIAS -#else -# define FMT_DEPRECATED_ALIAS FMT_DEPRECATED -#endif + auto bind_info = make_unique(); + bind_info->schema = default_macro.schema; + bind_info->name = default_macro.name; + bind_info->temporary = true; + bind_info->internal = true; + bind_info->type = function->type == MacroType::TABLE_MACRO ? CatalogType::TABLE_MACRO_ENTRY : CatalogType::MACRO_ENTRY; + bind_info->function = move(function); + return bind_info; -#ifndef FMT_BEGIN_NAMESPACE -# if FMT_HAS_FEATURE(cxx_inline_namespaces) || FMT_GCC_VERSION >= 404 || \ - FMT_MSC_VER >= 1900 -# define FMT_INLINE_NAMESPACE inline namespace -# define FMT_END_NAMESPACE \ - } \ - } -# else -# define FMT_INLINE_NAMESPACE namespace -# define FMT_END_NAMESPACE \ - } \ - using namespace v6; \ - } -# endif -# define FMT_BEGIN_NAMESPACE \ - namespace duckdb_fmt { \ - FMT_INLINE_NAMESPACE v6 { -#endif +} -#if !defined(FMT_HEADER_ONLY) && defined(_WIN32) -# ifdef FMT_EXPORT -# define FMT_API __declspec(dllexport) -# elif defined(FMT_SHARED) -# define FMT_API __declspec(dllimport) -# define FMT_EXTERN_TEMPLATE_API FMT_API -# endif -#endif -#ifndef FMT_API -# define FMT_API -#endif -#ifndef FMT_EXTERN_TEMPLATE_API -# define FMT_EXTERN_TEMPLATE_API -#endif +unique_ptr DefaultFunctionGenerator::CreateInternalMacroInfo(DefaultMacro &default_macro) { + // parse the expression + auto expressions = Parser::ParseExpressionList(default_macro.macro); + D_ASSERT(expressions.size() == 1); -#ifndef FMT_HEADER_ONLY -# define FMT_EXTERN extern -#else -# define FMT_EXTERN -#endif + auto result = make_unique(move(expressions[0])); + return CreateInternalTableMacroInfo(default_macro, move(result)); +} -// libc++ supports string_view in pre-c++17. -#if (FMT_HAS_INCLUDE() && \ - (__cplusplus > 201402L || defined(_LIBCPP_VERSION))) || \ - (defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910) -# include -# define FMT_USE_STRING_VIEW -#elif FMT_HAS_INCLUDE("experimental/string_view") && __cplusplus >= 201402L -# include -# define FMT_USE_EXPERIMENTAL_STRING_VIEW -#endif +unique_ptr DefaultFunctionGenerator::CreateInternalTableMacroInfo(DefaultMacro &default_macro) { + Parser parser; + parser.ParseQuery(default_macro.macro); + D_ASSERT(parser.statements.size() == 1); + D_ASSERT(parser.statements[0]->type == StatementType::SELECT_STATEMENT); -FMT_BEGIN_NAMESPACE + auto &select = (SelectStatement &) *parser.statements[0]; + auto result = make_unique(move(select.node)); + return CreateInternalTableMacroInfo(default_macro, move(result)); +} -// Implementations of enable_if_t and other types for pre-C++14 systems. -template -using enable_if_t = typename std::enable_if::type; -template -using conditional_t = typename std::conditional::type; -template using bool_constant = std::integral_constant; -template -using remove_reference_t = typename std::remove_reference::type; -template -using remove_const_t = typename std::remove_const::type; -template -using remove_cvref_t = typename std::remove_cv>::type; +static unique_ptr GetDefaultFunction(const string &input_schema, const string &input_name) { + auto schema = StringUtil::Lower(input_schema); + auto name = StringUtil::Lower(input_name); + for (idx_t index = 0; internal_macros[index].name != nullptr; index++) { + if (internal_macros[index].schema == schema && internal_macros[index].name == name) { + return DefaultFunctionGenerator::CreateInternalMacroInfo(internal_macros[index]); + } + } + return nullptr; +} -struct monostate {}; +DefaultFunctionGenerator::DefaultFunctionGenerator(Catalog &catalog, SchemaCatalogEntry *schema) + : DefaultGenerator(catalog), schema(schema) { +} -// An enable_if helper to be used in template parameters which results in much -// shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed -// to workaround a bug in MSVC 2019 (see #1140 and #1186). -#define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0 +unique_ptr DefaultFunctionGenerator::CreateDefaultEntry(ClientContext &context, + const string &entry_name) { + auto info = GetDefaultFunction(schema->name, entry_name); + if (info) { + return make_unique_base(&catalog, schema, (CreateMacroInfo *)info.get()); + } + return nullptr; +} -namespace internal { +vector DefaultFunctionGenerator::GetDefaultEntries() { + vector result; + for (idx_t index = 0; internal_macros[index].name != nullptr; index++) { + if (internal_macros[index].schema == schema->name) { + result.emplace_back(internal_macros[index].name); + } + } + return result; +} -// A workaround for gcc 4.8 to make void_t work in a SFINAE context. -template struct void_t_impl { using type = void; }; +} // namespace duckdb -#ifndef FMT_ASSERT -#define FMT_ASSERT(condition, message) -#endif -#if defined(FMT_USE_STRING_VIEW) -template using std_string_view = std::basic_string_view; -#elif defined(FMT_USE_EXPERIMENTAL_STRING_VIEW) -template -using std_string_view = std::experimental::basic_string_view; -#else -template struct std_string_view {}; -#endif -#ifdef FMT_USE_INT128 -// Do nothing. -#elif defined(__SIZEOF_INT128__) -# define FMT_USE_INT128 1 -using int128_t = __int128_t; -using uint128_t = __uint128_t; -#else -# define FMT_USE_INT128 0 -#endif -#if !FMT_USE_INT128 -struct int128_t {}; -struct uint128_t {}; -#endif -// Casts a nonnegative integer to unsigned. -template -FMT_CONSTEXPR typename std::make_unsigned::type to_unsigned(Int value) { - FMT_ASSERT(value >= 0, "negative value"); - return static_cast::type>(value); -} -} // namespace internal +namespace duckdb { -template -using void_t = typename internal::void_t_impl::type; +struct DefaultSchema { + const char *name; +}; -/** - An implementation of ``std::basic_string_view`` for pre-C++17. It provides a - subset of the API. ``fmt::basic_string_view`` is used for format strings even - if ``std::string_view`` is available to prevent issues when a library is - compiled with a different ``-std`` option than the client code (which is not - recommended). - */ -template class basic_string_view { - private: - const Char* data_; - size_t size_; +static DefaultSchema internal_schemas[] = {{"information_schema"}, {"pg_catalog"}, {nullptr}}; - public: - using char_type = Char; - using iterator = const Char*; +static bool GetDefaultSchema(const string &input_schema) { + auto schema = StringUtil::Lower(input_schema); + for (idx_t index = 0; internal_schemas[index].name != nullptr; index++) { + if (internal_schemas[index].name == schema) { + return true; + } + } + return false; +} - FMT_CONSTEXPR basic_string_view() FMT_NOEXCEPT : data_(nullptr), size_(0) {} +DefaultSchemaGenerator::DefaultSchemaGenerator(Catalog &catalog) : DefaultGenerator(catalog) { +} - /** Constructs a string reference object from a C string and a size. */ - FMT_CONSTEXPR basic_string_view(const Char* s, size_t count) FMT_NOEXCEPT - : data_(s), - size_(count) {} +unique_ptr DefaultSchemaGenerator::CreateDefaultEntry(ClientContext &context, const string &entry_name) { + if (GetDefaultSchema(entry_name)) { + return make_unique_base(&catalog, StringUtil::Lower(entry_name), true); + } + return nullptr; +} - /** - \rst - Constructs a string reference object from a C string computing - the size with ``std::char_traits::length``. - \endrst - */ - basic_string_view(const Char* s) - : data_(s), size_(std::char_traits::length(s)) {} +vector DefaultSchemaGenerator::GetDefaultEntries() { + vector result; + for (idx_t index = 0; internal_schemas[index].name != nullptr; index++) { + result.emplace_back(internal_schemas[index].name); + } + return result; +} - /** Constructs a string reference from a ``std::basic_string`` object. */ - template - FMT_CONSTEXPR basic_string_view( - const std::basic_string& s) FMT_NOEXCEPT - : data_(s.data()), - size_(s.size()) {} +} // namespace duckdb - template < - typename S, - FMT_ENABLE_IF(std::is_same>::value)> - FMT_CONSTEXPR basic_string_view(S s) FMT_NOEXCEPT : data_(s.data()), - size_(s.size()) {} - /** Returns a pointer to the string data. */ - FMT_CONSTEXPR const Char* data() const { return data_; } - /** Returns the string size. */ - FMT_CONSTEXPR size_t size() const { return size_; } - FMT_CONSTEXPR iterator begin() const { return data_; } - FMT_CONSTEXPR iterator end() const { return data_ + size_; } - FMT_CONSTEXPR const Char& operator[](size_t pos) const { return data_[pos]; } - FMT_CONSTEXPR void remove_prefix(size_t n) { - data_ += n; - size_ -= n; - } - // Lexicographically compare this string reference to other. - int compare(basic_string_view other) const { - size_t str_size = size_ < other.size_ ? size_ : other.size_; - int result = std::char_traits::compare(data_, other.data_, str_size); - if (result == 0) - result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); - return result; - } +namespace duckdb { - friend bool operator==(basic_string_view lhs, basic_string_view rhs) { - return lhs.compare(rhs) == 0; - } - friend bool operator!=(basic_string_view lhs, basic_string_view rhs) { - return lhs.compare(rhs) != 0; - } - friend bool operator<(basic_string_view lhs, basic_string_view rhs) { - return lhs.compare(rhs) < 0; - } - friend bool operator<=(basic_string_view lhs, basic_string_view rhs) { - return lhs.compare(rhs) <= 0; - } - friend bool operator>(basic_string_view lhs, basic_string_view rhs) { - return lhs.compare(rhs) > 0; - } - friend bool operator>=(basic_string_view lhs, basic_string_view rhs) { - return lhs.compare(rhs) >= 0; - } +struct DefaultType { + const char *name; + LogicalTypeId type; }; -using string_view = basic_string_view; -using wstring_view = basic_string_view; - -#ifndef __cpp_char8_t -// A UTF-8 code unit type. -enum char8_t : unsigned char {}; -#endif - -/** Specifies if ``T`` is a character type. Can be specialized by users. */ -template struct is_char : std::false_type {}; -template <> struct is_char : std::true_type {}; -template <> struct is_char : std::true_type {}; -template <> struct is_char : std::true_type {}; -template <> struct is_char : std::true_type {}; -template <> struct is_char : std::true_type {}; - -/** - \rst - Returns a string view of `s`. In order to add custom string type support to - {fmt} provide an overload of `to_string_view` for it in the same namespace as - the type for the argument-dependent lookup to work. - - **Example**:: +static DefaultType internal_types[] = {{"int", LogicalTypeId::INTEGER}, + {"int4", LogicalTypeId::INTEGER}, + {"signed", LogicalTypeId::INTEGER}, + {"integer", LogicalTypeId::INTEGER}, + {"integral", LogicalTypeId::INTEGER}, + {"int32", LogicalTypeId::INTEGER}, + {"varchar", LogicalTypeId::VARCHAR}, + {"bpchar", LogicalTypeId::VARCHAR}, + {"text", LogicalTypeId::VARCHAR}, + {"string", LogicalTypeId::VARCHAR}, + {"char", LogicalTypeId::VARCHAR}, + {"nvarchar", LogicalTypeId::VARCHAR}, + {"bytea", LogicalTypeId::BLOB}, + {"blob", LogicalTypeId::BLOB}, + {"varbinary", LogicalTypeId::BLOB}, + {"binary", LogicalTypeId::BLOB}, + {"int8", LogicalTypeId::BIGINT}, + {"bigint", LogicalTypeId::BIGINT}, + {"int64", LogicalTypeId::BIGINT}, + {"long", LogicalTypeId::BIGINT}, + {"oid", LogicalTypeId::BIGINT}, + {"int2", LogicalTypeId::SMALLINT}, + {"smallint", LogicalTypeId::SMALLINT}, + {"short", LogicalTypeId::SMALLINT}, + {"int16", LogicalTypeId::SMALLINT}, + {"timestamp", LogicalTypeId::TIMESTAMP}, + {"datetime", LogicalTypeId::TIMESTAMP}, + {"timestamp_us", LogicalTypeId::TIMESTAMP}, + {"timestamp_ms", LogicalTypeId::TIMESTAMP_MS}, + {"timestamp_ns", LogicalTypeId::TIMESTAMP_NS}, + {"timestamp_s", LogicalTypeId::TIMESTAMP_SEC}, + {"bool", LogicalTypeId::BOOLEAN}, + {"boolean", LogicalTypeId::BOOLEAN}, + {"logical", LogicalTypeId::BOOLEAN}, + {"decimal", LogicalTypeId::DECIMAL}, + {"dec", LogicalTypeId::DECIMAL}, + {"numeric", LogicalTypeId::DECIMAL}, + {"real", LogicalTypeId::FLOAT}, + {"float4", LogicalTypeId::FLOAT}, + {"float", LogicalTypeId::FLOAT}, + {"double", LogicalTypeId::DOUBLE}, + {"float8", LogicalTypeId::DOUBLE}, + {"tinyint", LogicalTypeId::TINYINT}, + {"int1", LogicalTypeId::TINYINT}, + {"date", LogicalTypeId::DATE}, + {"time", LogicalTypeId::TIME}, + {"interval", LogicalTypeId::INTERVAL}, + {"hugeint", LogicalTypeId::HUGEINT}, + {"int128", LogicalTypeId::HUGEINT}, + {"uuid", LogicalTypeId::UUID}, + {"guid", LogicalTypeId::UUID}, + {"struct", LogicalTypeId::STRUCT}, + {"row", LogicalTypeId::STRUCT}, + {"list", LogicalTypeId::LIST}, + {"map", LogicalTypeId::MAP}, + {"utinyint", LogicalTypeId::UTINYINT}, + {"uint8", LogicalTypeId::UTINYINT}, + {"usmallint", LogicalTypeId::USMALLINT}, + {"uint16", LogicalTypeId::USMALLINT}, + {"uinteger", LogicalTypeId::UINTEGER}, + {"uint32", LogicalTypeId::UINTEGER}, + {"ubigint", LogicalTypeId::UBIGINT}, + {"uint64", LogicalTypeId::UBIGINT}, + {"union", LogicalTypeId::UNION}, + {"timestamptz", LogicalTypeId::TIMESTAMP_TZ}, + {"timetz", LogicalTypeId::TIME_TZ}, + {"json", LogicalTypeId::JSON}, + {"null", LogicalTypeId::SQLNULL}, + {nullptr, LogicalTypeId::INVALID}}; - namespace my_ns { - inline string_view to_string_view(const my_string& s) { - return {s.data(), s.length()}; - } - } - std::string message = fmt::format(my_string("The answer is {}"), 42); - \endrst - */ -template ::value)> -inline basic_string_view to_string_view(const Char* s) { - return s; +LogicalTypeId DefaultTypeGenerator::GetDefaultType(const string &name) { + auto lower_str = StringUtil::Lower(name); + for (idx_t index = 0; internal_types[index].name != nullptr; index++) { + if (internal_types[index].name == lower_str) { + return internal_types[index].type; + } + } + return LogicalTypeId::INVALID; } -template -inline basic_string_view to_string_view( - const std::basic_string& s) { - return s; +DefaultTypeGenerator::DefaultTypeGenerator(Catalog &catalog, SchemaCatalogEntry *schema) + : DefaultGenerator(catalog), schema(schema) { } -template -inline basic_string_view to_string_view(basic_string_view s) { - return s; +unique_ptr DefaultTypeGenerator::CreateDefaultEntry(ClientContext &context, const string &entry_name) { + if (schema->name != DEFAULT_SCHEMA) { + return nullptr; + } + auto type_id = GetDefaultType(entry_name); + if (type_id == LogicalTypeId::INVALID) { + return nullptr; + } + CreateTypeInfo info; + info.name = entry_name; + info.type = LogicalType(type_id); + info.internal = true; + info.temporary = true; + return make_unique_base(&catalog, schema, &info); } -template >::value)> -inline basic_string_view to_string_view( - internal::std_string_view s) { - return s; +vector DefaultTypeGenerator::GetDefaultEntries() { + vector result; + if (schema->name != DEFAULT_SCHEMA) { + return result; + } + for (idx_t index = 0; internal_types[index].name != nullptr; index++) { + result.emplace_back(internal_types[index].name); + } + return result; } -// A base class for compile-time strings. It is defined in the fmt namespace to -// make formatting functions visible via ADL, e.g. format(fmt("{}"), 42). -struct compile_string {}; - -template -struct is_compile_string : std::is_base_of {}; +} // namespace duckdb -template ::value)> -constexpr basic_string_view to_string_view(const S& s) { - return s; -} -namespace internal { -void to_string_view(...); -using duckdb_fmt::v6::to_string_view; -// Specifies whether S is a string type convertible to fmt::basic_string_view. -// It should be a constexpr function but MSVC 2017 fails to compile it in -// enable_if and MSVC 2015 fails to compile it as an alias template. -template -struct is_string : std::is_class()))> { -}; -template struct char_t_impl {}; -template struct char_t_impl::value>> { - using result = decltype(to_string_view(std::declval())); - using type = typename result::char_type; -}; -struct error_handler { - FMT_CONSTEXPR error_handler() = default; - FMT_CONSTEXPR error_handler(const error_handler&) = default; - // This function is intentionally not constexpr to give a compile-time error. - FMT_NORETURN FMT_API void on_error(const char* message); -}; -} // namespace internal -/** String's character type. */ -template using char_t = typename internal::char_t_impl::type; -/** - \rst - Parsing context consisting of a format string range being parsed and an - argument counter for automatic indexing. +namespace duckdb { - You can use one of the following type aliases for common character types: +struct DefaultView { + const char *schema; + const char *name; + const char *sql; +}; - +-----------------------+-------------------------------------+ - | Type | Definition | - +=======================+=====================================+ - | format_parse_context | basic_format_parse_context | - +-----------------------+-------------------------------------+ - | wformat_parse_context | basic_format_parse_context | - +-----------------------+-------------------------------------+ - \endrst - */ -template -class basic_format_parse_context : private ErrorHandler { - private: - basic_string_view format_str_; - int next_arg_id_; +static DefaultView internal_views[] = { + {DEFAULT_SCHEMA, "pragma_database_list", "SELECT * FROM pragma_database_list()"}, + {DEFAULT_SCHEMA, "sqlite_master", "select 'table' \"type\", table_name \"name\", table_name \"tbl_name\", 0 rootpage, sql from duckdb_tables union all select 'view' \"type\", view_name \"name\", view_name \"tbl_name\", 0 rootpage, sql from duckdb_views union all select 'index' \"type\", index_name \"name\", table_name \"tbl_name\", 0 rootpage, sql from duckdb_indexes;"}, + {DEFAULT_SCHEMA, "sqlite_schema", "SELECT * FROM sqlite_master"}, + {DEFAULT_SCHEMA, "sqlite_temp_master", "SELECT * FROM sqlite_master"}, + {DEFAULT_SCHEMA, "sqlite_temp_schema", "SELECT * FROM sqlite_master"}, + {DEFAULT_SCHEMA, "duckdb_constraints", "SELECT * FROM duckdb_constraints()"}, + {DEFAULT_SCHEMA, "duckdb_columns", "SELECT * FROM duckdb_columns() WHERE NOT internal"}, + {DEFAULT_SCHEMA, "duckdb_indexes", "SELECT * FROM duckdb_indexes()"}, + {DEFAULT_SCHEMA, "duckdb_schemas", "SELECT * FROM duckdb_schemas() WHERE NOT internal"}, + {DEFAULT_SCHEMA, "duckdb_tables", "SELECT * FROM duckdb_tables() WHERE NOT internal"}, + {DEFAULT_SCHEMA, "duckdb_types", "SELECT * FROM duckdb_types()"}, + {DEFAULT_SCHEMA, "duckdb_views", "SELECT * FROM duckdb_views() WHERE NOT internal"}, + {"pg_catalog", "pg_am", "SELECT 0 oid, 'art' amname, NULL amhandler, 'i' amtype"}, + {"pg_catalog", "pg_attribute", "SELECT table_oid attrelid, column_name attname, data_type_id atttypid, 0 attstattarget, NULL attlen, column_index attnum, 0 attndims, -1 attcacheoff, case when data_type ilike '%decimal%' then numeric_precision*1000+numeric_scale else -1 end atttypmod, false attbyval, NULL attstorage, NULL attalign, NOT is_nullable attnotnull, column_default IS NOT NULL atthasdef, false atthasmissing, '' attidentity, '' attgenerated, false attisdropped, true attislocal, 0 attinhcount, 0 attcollation, NULL attcompression, NULL attacl, NULL attoptions, NULL attfdwoptions, NULL attmissingval FROM duckdb_columns()"}, + {"pg_catalog", "pg_attrdef", "SELECT column_index oid, table_oid adrelid, column_index adnum, column_default adbin from duckdb_columns() where column_default is not null;"}, + {"pg_catalog", "pg_class", "SELECT table_oid oid, table_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, estimated_size::real reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, index_count > 0 relhasindex, false relisshared, case when temporary then 't' else 'p' end relpersistence, 'r' relkind, column_count relnatts, check_constraint_count relchecks, false relhasoids, has_primary_key relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_tables() UNION ALL SELECT view_oid oid, view_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, 0 reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, false relhasindex, false relisshared, case when temporary then 't' else 'p' end relpersistence, 'v' relkind, column_count relnatts, 0 relchecks, false relhasoids, false relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_views() UNION ALL SELECT sequence_oid oid, sequence_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, 0 reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, false relhasindex, false relisshared, case when temporary then 't' else 'p' end relpersistence, 'S' relkind, 0 relnatts, 0 relchecks, false relhasoids, false relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_sequences() UNION ALL SELECT index_oid oid, index_name relname, schema_oid relnamespace, 0 reltype, 0 reloftype, 0 relowner, 0 relam, 0 relfilenode, 0 reltablespace, 0 relpages, 0 reltuples, 0 relallvisible, 0 reltoastrelid, 0 reltoastidxid, false relhasindex, false relisshared, 't' relpersistence, 'i' relkind, NULL relnatts, 0 relchecks, false relhasoids, false relhaspkey, false relhasrules, false relhastriggers, false relhassubclass, false relrowsecurity, true relispopulated, NULL relreplident, false relispartition, 0 relrewrite, 0 relfrozenxid, NULL relminmxid, NULL relacl, NULL reloptions, NULL relpartbound FROM duckdb_indexes()"}, + {"pg_catalog", "pg_constraint", "SELECT table_oid*1000000+constraint_index oid, constraint_text conname, schema_oid connamespace, CASE constraint_type WHEN 'CHECK' then 'c' WHEN 'UNIQUE' then 'u' WHEN 'PRIMARY KEY' THEN 'p' WHEN 'FOREIGN KEY' THEN 'f' ELSE 'x' END contype, false condeferrable, false condeferred, true convalidated, table_oid conrelid, 0 contypid, 0 conindid, 0 conparentid, 0 confrelid, NULL confupdtype, NULL confdeltype, NULL confmatchtype, true conislocal, 0 coninhcount, false connoinherit, constraint_column_indexes conkey, NULL confkey, NULL conpfeqop, NULL conppeqop, NULL conffeqop, NULL conexclop, expression conbin FROM duckdb_constraints()"}, + {"pg_catalog", "pg_depend", "SELECT * FROM duckdb_dependencies()"}, + {"pg_catalog", "pg_description", "SELECT NULL objoid, NULL classoid, NULL objsubid, NULL description WHERE 1=0"}, + {"pg_catalog", "pg_enum", "SELECT NULL oid, NULL enumtypid, NULL enumsortorder, NULL enumlabel WHERE 1=0"}, + {"pg_catalog", "pg_index", "SELECT index_oid indexrelid, table_oid indrelid, 0 indnatts, 0 indnkeyatts, is_unique indisunique, is_primary indisprimary, false indisexclusion, true indimmediate, false indisclustered, true indisvalid, false indcheckxmin, true indisready, true indislive, false indisreplident, NULL::INT[] indkey, NULL::OID[] indcollation, NULL::OID[] indclass, NULL::INT[] indoption, expressions indexprs, NULL indpred FROM duckdb_indexes()"}, + {"pg_catalog", "pg_indexes", "SELECT schema_name schemaname, table_name tablename, index_name indexname, NULL \"tablespace\", sql indexdef FROM duckdb_indexes()"}, + {"pg_catalog", "pg_namespace", "SELECT oid, schema_name nspname, 0 nspowner, NULL nspacl FROM duckdb_schemas()"}, + {"pg_catalog", "pg_sequence", "SELECT sequence_oid seqrelid, 0 seqtypid, start_value seqstart, increment_by seqincrement, max_value seqmax, min_value seqmin, 0 seqcache, cycle seqcycle FROM duckdb_sequences()"}, + {"pg_catalog", "pg_sequences", "SELECT schema_name schemaname, sequence_name sequencename, 'duckdb' sequenceowner, 0 data_type, start_value, min_value, max_value, increment_by, cycle, 0 cache_size, last_value FROM duckdb_sequences()"}, + {"pg_catalog", "pg_tables", "SELECT schema_name schemaname, table_name tablename, 'duckdb' tableowner, NULL \"tablespace\", index_count > 0 hasindexes, false hasrules, false hastriggers FROM duckdb_tables()"}, + {"pg_catalog", "pg_tablespace", "SELECT 0 oid, 'pg_default' spcname, 0 spcowner, NULL spcacl, NULL spcoptions"}, + {"pg_catalog", "pg_type", "SELECT type_oid oid, format_pg_type(type_name) typname, schema_oid typnamespace, 0 typowner, type_size typlen, false typbyval, 'b' typtype, CASE WHEN type_category='NUMERIC' THEN 'N' WHEN type_category='STRING' THEN 'S' WHEN type_category='DATETIME' THEN 'D' WHEN type_category='BOOLEAN' THEN 'B' WHEN type_category='COMPOSITE' THEN 'C' WHEN type_category='USER' THEN 'U' ELSE 'X' END typcategory, false typispreferred, true typisdefined, NULL typdelim, NULL typrelid, NULL typsubscript, NULL typelem, NULL typarray, NULL typinput, NULL typoutput, NULL typreceive, NULL typsend, NULL typmodin, NULL typmodout, NULL typanalyze, 'd' typalign, 'p' typstorage, NULL typnotnull, NULL typbasetype, NULL typtypmod, NULL typndims, NULL typcollation, NULL typdefaultbin, NULL typdefault, NULL typacl FROM duckdb_types();"}, + {"pg_catalog", "pg_views", "SELECT schema_name schemaname, view_name viewname, 'duckdb' viewowner, sql definition FROM duckdb_views()"}, + {"information_schema", "columns", "SELECT NULL table_catalog, schema_name table_schema, table_name, column_name, column_index ordinal_position, column_default, CASE WHEN is_nullable THEN 'YES' ELSE 'NO' END is_nullable, data_type, character_maximum_length, NULL character_octet_length, numeric_precision, numeric_precision_radix, numeric_scale, NULL datetime_precision, NULL interval_type, NULL interval_precision, NULL character_set_catalog, NULL character_set_schema, NULL character_set_name, NULL collation_catalog, NULL collation_schema, NULL collation_name, NULL domain_catalog, NULL domain_schema, NULL domain_name, NULL udt_catalog, NULL udt_schema, NULL udt_name, NULL scope_catalog, NULL scope_schema, NULL scope_name, NULL maximum_cardinality, NULL dtd_identifier, NULL is_self_referencing, NULL is_identity, NULL identity_generation, NULL identity_start, NULL identity_increment, NULL identity_maximum, NULL identity_minimum, NULL identity_cycle, NULL is_generated, NULL generation_expression, NULL is_updatable FROM duckdb_columns;"}, + {"information_schema", "schemata", "SELECT NULL catalog_name, schema_name, 'duckdb' schema_owner, NULL default_character_set_catalog, NULL default_character_set_schema, NULL default_character_set_name, sql sql_path FROM duckdb_schemas()"}, + {"information_schema", "tables", "SELECT NULL table_catalog, schema_name table_schema, table_name, CASE WHEN temporary THEN 'LOCAL TEMPORARY' ELSE 'BASE TABLE' END table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'YES' is_insertable_into, 'NO' is_typed, CASE WHEN temporary THEN 'PRESERVE' ELSE NULL END commit_action FROM duckdb_tables() UNION ALL SELECT NULL table_catalog, schema_name table_schema, view_name table_name, 'VIEW' table_type, NULL self_referencing_column_name, NULL reference_generation, NULL user_defined_type_catalog, NULL user_defined_type_schema, NULL user_defined_type_name, 'NO' is_insertable_into, 'NO' is_typed, NULL commit_action FROM duckdb_views;"}, + {nullptr, nullptr, nullptr}}; - public: - using char_type = Char; - using iterator = typename basic_string_view::iterator; +static unique_ptr GetDefaultView(const string &input_schema, const string &input_name) { + auto schema = StringUtil::Lower(input_schema); + auto name = StringUtil::Lower(input_name); + for (idx_t index = 0; internal_views[index].name != nullptr; index++) { + if (internal_views[index].schema == schema && internal_views[index].name == name) { + auto result = make_unique(); + result->schema = schema; + result->sql = internal_views[index].sql; - explicit FMT_CONSTEXPR basic_format_parse_context( - basic_string_view format_str, ErrorHandler eh = ErrorHandler()) - : ErrorHandler(eh), format_str_(format_str), next_arg_id_(0) {} + Parser parser; + parser.ParseQuery(internal_views[index].sql); + D_ASSERT(parser.statements.size() == 1 && parser.statements[0]->type == StatementType::SELECT_STATEMENT); + result->query = unique_ptr_cast(move(parser.statements[0])); + result->temporary = true; + result->internal = true; + result->view_name = name; + return result; + } + } + return nullptr; +} - /** - Returns an iterator to the beginning of the format string range being - parsed. - */ - FMT_CONSTEXPR iterator begin() const FMT_NOEXCEPT { - return format_str_.begin(); - } +DefaultViewGenerator::DefaultViewGenerator(Catalog &catalog, SchemaCatalogEntry *schema) + : DefaultGenerator(catalog), schema(schema) { +} - /** - Returns an iterator past the end of the format string range being parsed. - */ - FMT_CONSTEXPR iterator end() const FMT_NOEXCEPT { return format_str_.end(); } +unique_ptr DefaultViewGenerator::CreateDefaultEntry(ClientContext &context, const string &entry_name) { + auto info = GetDefaultView(schema->name, entry_name); + if (info) { + auto binder = Binder::CreateBinder(context); + binder->BindCreateViewInfo(*info); - /** Advances the begin iterator to ``it``. */ - FMT_CONSTEXPR void advance_to(iterator it) { - format_str_.remove_prefix(internal::to_unsigned(it - begin())); - } + return make_unique_base(&catalog, schema, info.get()); + } + return nullptr; +} - /** - Reports an error if using the manual argument indexing; otherwise returns - the next argument index and switches to the automatic indexing. - */ - FMT_CONSTEXPR int next_arg_id() { - if (next_arg_id_ >= 0) return next_arg_id_++; - on_error("cannot switch from manual to automatic argument indexing"); - return 0; - } +vector DefaultViewGenerator::GetDefaultEntries() { + vector result; + for (idx_t index = 0; internal_views[index].name != nullptr; index++) { + if (internal_views[index].schema == schema->name) { + result.emplace_back(internal_views[index].name); + } + } + return result; +} - /** - Reports an error if using the automatic argument indexing; otherwise - switches to the manual indexing. - */ - FMT_CONSTEXPR void check_arg_id(int) { - if (next_arg_id_ > 0) - on_error("cannot switch from automatic to manual argument indexing"); - else - next_arg_id_ = -1; - } +} // namespace duckdb - FMT_CONSTEXPR void check_arg_id(basic_string_view) {} - FMT_CONSTEXPR void on_error(const char* message) { - ErrorHandler::on_error(message); - } - FMT_CONSTEXPR ErrorHandler error_handler() const { return *this; } -}; -using format_parse_context = basic_format_parse_context; -using wformat_parse_context = basic_format_parse_context; -template -using basic_parse_context FMT_DEPRECATED_ALIAS = - basic_format_parse_context; -using parse_context FMT_DEPRECATED_ALIAS = basic_format_parse_context; -using wparse_context FMT_DEPRECATED_ALIAS = basic_format_parse_context; -template class basic_format_arg; -template class basic_format_args; -// A formatter for objects of type T. -template -struct formatter { - // A deleted default constructor indicates a disabled formatter. - formatter() = delete; -}; -template -struct FMT_DEPRECATED convert_to_int - : bool_constant::value && - std::is_convertible::value> {}; -// Specifies if T has an enabled formatter specialization. A type can be -// formattable even if it doesn't have a formatter e.g. via a conversion. -template -using has_formatter = - std::is_constructible>; +namespace duckdb { -namespace internal { +DependencyManager::DependencyManager(Catalog &catalog) : catalog(catalog) { +} -/** A contiguous memory buffer with an optional growing ability. */ -template class buffer { - private: - T* ptr_; - std::size_t size_; - std::size_t capacity_; +void DependencyManager::AddObject(ClientContext &context, CatalogEntry *object, + unordered_set &dependencies) { + // check for each object in the sources if they were not deleted yet + for (auto &dependency : dependencies) { + idx_t entry_index; + CatalogEntry *catalog_entry; + if (!dependency->set) { + throw InternalException("Dependency has no set"); + } + if (!dependency->set->GetEntryInternal(context, dependency->name, entry_index, catalog_entry)) { + throw InternalException("Dependency has already been deleted?"); + } + } + // indexes do not require CASCADE to be dropped, they are simply always dropped along with the table + auto dependency_type = object->type == CatalogType::INDEX_ENTRY ? DependencyType::DEPENDENCY_AUTOMATIC + : DependencyType::DEPENDENCY_REGULAR; + // add the object to the dependents_map of each object that it depends on + for (auto &dependency : dependencies) { + dependents_map[dependency].insert(Dependency(object, dependency_type)); + } + // create the dependents map for this object: it starts out empty + dependents_map[object] = dependency_set_t(); + dependencies_map[object] = dependencies; +} - protected: - // Don't initialize ptr_ since it is not accessed to save a few cycles. - buffer(std::size_t sz) FMT_NOEXCEPT : size_(sz), capacity_(sz) {} +void DependencyManager::DropObject(ClientContext &context, CatalogEntry *object, bool cascade) { + D_ASSERT(dependents_map.find(object) != dependents_map.end()); - buffer(T* p = nullptr, std::size_t sz = 0, std::size_t cap = 0) FMT_NOEXCEPT - : ptr_(p), - size_(sz), - capacity_(cap) {} + // first check the objects that depend on this object + auto &dependent_objects = dependents_map[object]; + for (auto &dep : dependent_objects) { + // look up the entry in the catalog set + auto &catalog_set = *dep.entry->set; + auto mapping_value = catalog_set.GetMapping(context, dep.entry->name, true /* get_latest */); + if (mapping_value == nullptr) { + continue; + } + idx_t entry_index = mapping_value->index; + CatalogEntry *dependency_entry; - /** Sets the buffer data and capacity. */ - void set(T* buf_data, std::size_t buf_capacity) FMT_NOEXCEPT { - ptr_ = buf_data; - capacity_ = buf_capacity; - } + if (!catalog_set.GetEntryInternal(context, entry_index, dependency_entry)) { + // the dependent object was already deleted, no conflict + continue; + } + // conflict: attempting to delete this object but the dependent object still exists + if (cascade || dep.dependency_type == DependencyType::DEPENDENCY_AUTOMATIC || + dep.dependency_type == DependencyType::DEPENDENCY_OWNS) { + // cascade: drop the dependent object + catalog_set.DropEntryInternal(context, entry_index, *dependency_entry, cascade); + } else { + // no cascade and there are objects that depend on this object: throw error + throw DependencyException("Cannot drop entry \"%s\" because there are entries that " + "depend on it. Use DROP...CASCADE to drop all dependents.", + object->name); + } + } +} - /** Increases the buffer capacity to hold at least *capacity* elements. */ - virtual void grow(std::size_t capacity) = 0; +void DependencyManager::AlterObject(ClientContext &context, CatalogEntry *old_obj, CatalogEntry *new_obj) { + D_ASSERT(dependents_map.find(old_obj) != dependents_map.end()); + D_ASSERT(dependencies_map.find(old_obj) != dependencies_map.end()); - public: - using value_type = T; - using const_reference = const T&; + // first check the objects that depend on this object + vector owned_objects_to_add; + auto &dependent_objects = dependents_map[old_obj]; + for (auto &dep : dependent_objects) { + // look up the entry in the catalog set + auto &catalog_set = *dep.entry->set; + idx_t entry_index; + CatalogEntry *dependency_entry; + if (!catalog_set.GetEntryInternal(context, dep.entry->name, entry_index, dependency_entry)) { + // the dependent object was already deleted, no conflict + continue; + } + if (dep.dependency_type == DependencyType::DEPENDENCY_OWNS) { + // the dependent object is owned by the current object + owned_objects_to_add.push_back(dep.entry); + continue; + } + // conflict: attempting to alter this object but the dependent object still exists + // no cascade and there are objects that depend on this object: throw error + throw DependencyException("Cannot alter entry \"%s\" because there are entries that " + "depend on it.", + old_obj->name); + } + // add the new object to the dependents_map of each object that it depends on + auto &old_dependencies = dependencies_map[old_obj]; + vector to_delete; + for (auto &dependency : old_dependencies) { + if (dependency->type == CatalogType::TYPE_ENTRY) { + auto user_type = (TypeCatalogEntry *)dependency; + auto table = (TableCatalogEntry *)new_obj; + bool deleted_dependency = true; + for (auto &column : table->columns.Logical()) { + if (column.Type() == user_type->user_type) { + deleted_dependency = false; + break; + } + } + if (deleted_dependency) { + to_delete.push_back(dependency); + continue; + } + } + dependents_map[dependency].insert(new_obj); + } + for (auto &dependency : to_delete) { + old_dependencies.erase(dependency); + dependents_map[dependency].erase(old_obj); + } - buffer(const buffer&) = delete; - void operator=(const buffer&) = delete; - virtual ~buffer() = default; + // We might have to add a type dependency + vector to_add; + if (new_obj->type == CatalogType::TABLE_ENTRY) { + auto table = (TableCatalogEntry *)new_obj; + for (auto &column : table->columns.Logical()) { + auto user_type_catalog = LogicalType::GetCatalog(column.Type()); + if (user_type_catalog) { + to_add.push_back(user_type_catalog); + } + } + } + // add the new object to the dependency manager + dependents_map[new_obj] = dependency_set_t(); + dependencies_map[new_obj] = old_dependencies; - T* begin() FMT_NOEXCEPT { return ptr_; } - T* end() FMT_NOEXCEPT { return ptr_ + size_; } + for (auto &dependency : to_add) { + dependencies_map[new_obj].insert(dependency); + dependents_map[dependency].insert(new_obj); + } - /** Returns the size of this buffer. */ - std::size_t size() const FMT_NOEXCEPT { return size_; } + for (auto &dependency : owned_objects_to_add) { + dependents_map[new_obj].insert(Dependency(dependency, DependencyType::DEPENDENCY_OWNS)); + dependents_map[dependency].insert(Dependency(new_obj, DependencyType::DEPENDENCY_OWNED_BY)); + dependencies_map[new_obj].insert(dependency); + } +} - /** Returns the capacity of this buffer. */ - std::size_t capacity() const FMT_NOEXCEPT { return capacity_; } +void DependencyManager::EraseObject(CatalogEntry *object) { + // obtain the writing lock + EraseObjectInternal(object); +} - /** Returns a pointer to the buffer data. */ - T* data() FMT_NOEXCEPT { return ptr_; } +void DependencyManager::EraseObjectInternal(CatalogEntry *object) { + if (dependents_map.find(object) == dependents_map.end()) { + // dependencies already removed + return; + } + D_ASSERT(dependents_map.find(object) != dependents_map.end()); + D_ASSERT(dependencies_map.find(object) != dependencies_map.end()); + // now for each of the dependencies, erase the entries from the dependents_map + for (auto &dependency : dependencies_map[object]) { + auto entry = dependents_map.find(dependency); + if (entry != dependents_map.end()) { + D_ASSERT(entry->second.find(object) != entry->second.end()); + entry->second.erase(object); + } + } + // erase the dependents and dependencies for this object + dependents_map.erase(object); + dependencies_map.erase(object); +} - /** Returns a pointer to the buffer data. */ - const T* data() const FMT_NOEXCEPT { return ptr_; } +void DependencyManager::Scan(const std::function &callback) { + lock_guard write_lock(catalog.write_lock); + for (auto &entry : dependents_map) { + for (auto &dependent : entry.second) { + callback(entry.first, dependent.entry, dependent.dependency_type); + } + } +} - /** - Resizes the buffer. If T is a POD type new elements may not be initialized. - */ - void resize(std::size_t new_size) { - reserve(new_size); - size_ = new_size; - } +void DependencyManager::AddOwnership(ClientContext &context, CatalogEntry *owner, CatalogEntry *entry) { + // lock the catalog for writing + lock_guard write_lock(catalog.write_lock); - /** Clears this buffer. */ - void clear() { size_ = 0; } + // If the owner is already owned by something else, throw an error + for (auto &dep : dependents_map[owner]) { + if (dep.dependency_type == DependencyType::DEPENDENCY_OWNED_BY) { + throw DependencyException(owner->name + " already owned by " + dep.entry->name); + } + } - /** Reserves space to store at least *capacity* elements. */ - void reserve(std::size_t new_capacity) { - if (new_capacity > capacity_) grow(new_capacity); - } + // If the entry is already owned, throw an error + for (auto &dep : dependents_map[entry]) { + // if the entry is already owned, throw error + if (dep.entry != owner) { + throw DependencyException(entry->name + " already depends on " + dep.entry->name); + } + // if the entry owns the owner, throw error + if (dep.entry == owner && dep.dependency_type == DependencyType::DEPENDENCY_OWNS) { + throw DependencyException(entry->name + " already owns " + owner->name + + ". Cannot have circular dependencies"); + } + } - void push_back(const T& value) { - reserve(size_ + 1); - ptr_[size_++] = value; - } + // Emplace guarantees that the same object cannot be inserted twice in the unordered_set + // In the case AddOwnership is called twice, because of emplace, the object will not be repeated in the set. + // We use an automatic dependency because if the Owner gets deleted, then the owned objects are also deleted + dependents_map[owner].emplace(Dependency(entry, DependencyType::DEPENDENCY_OWNS)); + dependents_map[entry].emplace(Dependency(owner, DependencyType::DEPENDENCY_OWNED_BY)); + dependencies_map[owner].emplace(entry); +} - /** Appends data to the end of the buffer. */ - template void append(const U* begin, const U* end); +} // namespace duckdb - T& operator[](std::size_t index) { return ptr_[index]; } - const T& operator[](std::size_t index) const { return ptr_[index]; } -}; -// A container-backed buffer. -template -class container_buffer : public buffer { - private: - Container& container_; - protected: - void grow(std::size_t capacity) FMT_OVERRIDE { - container_.resize(capacity); - this->set(&container_[0], capacity); - } - public: - explicit container_buffer(Container& c) - : buffer(c.size()), container_(c) {} -}; -// Extracts a reference to the container from back_insert_iterator. -template -inline Container& get_container(std::back_insert_iterator it) { - using bi_iterator = std::back_insert_iterator; - struct accessor : bi_iterator { - accessor(bi_iterator iter) : bi_iterator(iter) {} - using bi_iterator::container; - }; - return *accessor(it).container; -} -template -struct fallback_formatter { - fallback_formatter() = delete; -}; +#include -// Specifies if T has an enabled fallback_formatter specialization. -template -using has_fallback_formatter = - std::is_constructible>; +#ifdef DUCKDB_DEBUG_ALLOCATION -template struct named_arg_base; -template struct named_arg; -enum type { - none_type, - named_arg_type, - // Integer types should go first, - int_type, - uint_type, - long_long_type, - ulong_long_type, - int128_type, - uint128_type, - bool_type, - char_type, - last_integer_type = char_type, - // followed by floating-point types. - float_type, - double_type, - long_double_type, - last_numeric_type = long_double_type, - cstring_type, - string_type, - pointer_type, - custom_type -}; -// Maps core type T to the corresponding type enum constant. -template -struct type_constant : std::integral_constant {}; -#define FMT_TYPE_CONSTANT(Type, constant) \ - template \ - struct type_constant : std::integral_constant {} +#include +#endif -FMT_TYPE_CONSTANT(const named_arg_base&, named_arg_type); -FMT_TYPE_CONSTANT(int, int_type); -FMT_TYPE_CONSTANT(unsigned, uint_type); -FMT_TYPE_CONSTANT(long long, long_long_type); -FMT_TYPE_CONSTANT(unsigned long long, ulong_long_type); -FMT_TYPE_CONSTANT(int128_t, int128_type); -FMT_TYPE_CONSTANT(uint128_t, uint128_type); -FMT_TYPE_CONSTANT(bool, bool_type); -FMT_TYPE_CONSTANT(Char, char_type); -FMT_TYPE_CONSTANT(float, float_type); -FMT_TYPE_CONSTANT(double, double_type); -FMT_TYPE_CONSTANT(long double, long_double_type); -FMT_TYPE_CONSTANT(const Char*, cstring_type); -FMT_TYPE_CONSTANT(basic_string_view, string_type); -FMT_TYPE_CONSTANT(const void*, pointer_type); +#if defined(BUILD_JEMALLOC_EXTENSION) && !defined(WIN32) +#include "jemalloc-extension.hpp" +#endif -FMT_CONSTEXPR bool is_integral_type(type t) { - FMT_ASSERT(t != named_arg_type, "invalid argument type"); - return t > none_type && t <= last_integer_type; +namespace duckdb { + +AllocatedData::AllocatedData() : allocator(nullptr), pointer(nullptr), allocated_size(0) { } -FMT_CONSTEXPR bool is_arithmetic_type(type t) { - FMT_ASSERT(t != named_arg_type, "invalid argument type"); - return t > none_type && t <= last_numeric_type; +AllocatedData::AllocatedData(Allocator &allocator, data_ptr_t pointer, idx_t allocated_size) + : allocator(&allocator), pointer(pointer), allocated_size(allocated_size) { +} +AllocatedData::~AllocatedData() { + Reset(); } -template struct string_value { - const Char* data; - std::size_t size; -}; +AllocatedData::AllocatedData(AllocatedData &&other) noexcept + : allocator(other.allocator), pointer(nullptr), allocated_size(0) { + std::swap(pointer, other.pointer); + std::swap(allocated_size, other.allocated_size); +} -template struct custom_value { - using parse_context = basic_format_parse_context; - const void* value; - void (*format)(const void* arg, parse_context& parse_ctx, Context& ctx); +AllocatedData &AllocatedData::operator=(AllocatedData &&other) noexcept { + std::swap(allocator, other.allocator); + std::swap(pointer, other.pointer); + std::swap(allocated_size, other.allocated_size); + return *this; +} + +void AllocatedData::Reset() { + if (!pointer) { + return; + } + D_ASSERT(allocator); + allocator->FreeData(pointer, allocated_size); + allocated_size = 0; + pointer = nullptr; +} + +//===--------------------------------------------------------------------===// +// Debug Info +//===--------------------------------------------------------------------===// +struct AllocatorDebugInfo { +#ifdef DEBUG + AllocatorDebugInfo(); + ~AllocatorDebugInfo(); + + void AllocateData(data_ptr_t pointer, idx_t size); + void FreeData(data_ptr_t pointer, idx_t size); + void ReallocateData(data_ptr_t pointer, data_ptr_t new_pointer, idx_t old_size, idx_t new_size); + +private: + //! The number of bytes that are outstanding (i.e. that have been allocated - but not freed) + //! Used for debug purposes + atomic allocation_count; +#ifdef DUCKDB_DEBUG_ALLOCATION + mutex pointer_lock; + //! Set of active outstanding pointers together with stack traces + unordered_map> pointers; +#endif +#endif }; -// A formatting argument value. -template class value { - public: - using char_type = typename Context::char_type; +PrivateAllocatorData::PrivateAllocatorData() { +} - union { - int int_value; - unsigned uint_value; - long long long_long_value; - unsigned long long ulong_long_value; - int128_t int128_value; - uint128_t uint128_value; - bool bool_value; - char_type char_value; - float float_value; - double double_value; - long double long_double_value; - const void* pointer; - string_value string; - custom_value custom; - const named_arg_base* named_arg; - }; +PrivateAllocatorData::~PrivateAllocatorData() { +} - FMT_CONSTEXPR value(int val = 0) : int_value(val) {} - FMT_CONSTEXPR value(unsigned val) : uint_value(val) {} - value(long long val) : long_long_value(val) {} - value(unsigned long long val) : ulong_long_value(val) {} - value(int128_t val) : int128_value(val) {} - value(uint128_t val) : uint128_value(val) {} - value(float val) : float_value(val) {} - value(double val) : double_value(val) {} - value(long double val) : long_double_value(val) {} - value(bool val) : bool_value(val) {} - value(char_type val) : char_value(val) {} - value(const char_type* val) { string.data = val; } - value(basic_string_view val) { - string.data = val.data(); - string.size = val.size(); - } - value(const void* val) : pointer(val) {} +//===--------------------------------------------------------------------===// +// Allocator +//===--------------------------------------------------------------------===// +#if defined(BUILD_JEMALLOC_EXTENSION) && !defined(WIN32) +Allocator::Allocator() + : Allocator(JEMallocExtension::Allocate, JEMallocExtension::Free, JEMallocExtension::Reallocate, nullptr) { +} +#else +Allocator::Allocator() + : Allocator(Allocator::DefaultAllocate, Allocator::DefaultFree, Allocator::DefaultReallocate, nullptr) { +} +#endif - template value(const T& val) { - custom.value = &val; - // Get the formatter type through the context to allow different contexts - // have different extension points, e.g. `formatter` for `format` and - // `printf_formatter` for `printf`. - custom.format = format_custom_arg< - T, conditional_t::value, - typename Context::template formatter_type, - fallback_formatter>>; - } +Allocator::Allocator(allocate_function_ptr_t allocate_function_p, free_function_ptr_t free_function_p, + reallocate_function_ptr_t reallocate_function_p, unique_ptr private_data_p) + : allocate_function(allocate_function_p), free_function(free_function_p), + reallocate_function(reallocate_function_p), private_data(move(private_data_p)) { + D_ASSERT(allocate_function); + D_ASSERT(free_function); + D_ASSERT(reallocate_function); +#ifdef DEBUG + if (!private_data) { + private_data = make_unique(); + } + private_data->debug_info = make_unique(); +#endif +} - value(const named_arg_base& val) { named_arg = &val; } +Allocator::~Allocator() { +} - private: - // Formats an argument of a custom type, such as a user-defined class. - template - static void format_custom_arg( - const void* arg, basic_format_parse_context& parse_ctx, - Context& ctx) { - Formatter f; - parse_ctx.advance_to(f.parse(parse_ctx)); - ctx.advance_to(f.format(*static_cast(arg), ctx)); - } -}; +data_ptr_t Allocator::AllocateData(idx_t size) { + D_ASSERT(size > 0); + auto result = allocate_function(private_data.get(), size); +#ifdef DEBUG + D_ASSERT(private_data); + private_data->debug_info->AllocateData(result, size); +#endif + return result; +} -template -FMT_CONSTEXPR basic_format_arg make_arg(const T& value); +void Allocator::FreeData(data_ptr_t pointer, idx_t size) { + if (!pointer) { + return; + } + D_ASSERT(size > 0); +#ifdef DEBUG + D_ASSERT(private_data); + private_data->debug_info->FreeData(pointer, size); +#endif + free_function(private_data.get(), pointer, size); +} -// To minimize the number of types we need to deal with, long is translated -// either to int or to long long depending on its size. -enum { long_short = sizeof(long) == sizeof(int) }; -using long_type = conditional_t; -using ulong_type = conditional_t; +data_ptr_t Allocator::ReallocateData(data_ptr_t pointer, idx_t old_size, idx_t size) { + if (!pointer) { + return nullptr; + } + auto new_pointer = reallocate_function(private_data.get(), pointer, old_size, size); +#ifdef DEBUG + D_ASSERT(private_data); + private_data->debug_info->ReallocateData(pointer, new_pointer, old_size, size); +#endif + return new_pointer; +} -// Maps formatting arguments to core types. -template struct arg_mapper { - using char_type = typename Context::char_type; +shared_ptr &Allocator::DefaultAllocatorReference() { + static shared_ptr DEFAULT_ALLOCATOR = make_shared(); + return DEFAULT_ALLOCATOR; +} - FMT_CONSTEXPR int map(signed char val) { return val; } - FMT_CONSTEXPR unsigned map(unsigned char val) { return val; } - FMT_CONSTEXPR int map(short val) { return val; } - FMT_CONSTEXPR unsigned map(unsigned short val) { return val; } - FMT_CONSTEXPR int map(int val) { return val; } - FMT_CONSTEXPR unsigned map(unsigned val) { return val; } - FMT_CONSTEXPR long_type map(long val) { return val; } - FMT_CONSTEXPR ulong_type map(unsigned long val) { return val; } - FMT_CONSTEXPR long long map(long long val) { return val; } - FMT_CONSTEXPR unsigned long long map(unsigned long long val) { return val; } - FMT_CONSTEXPR int128_t map(int128_t val) { return val; } - FMT_CONSTEXPR uint128_t map(uint128_t val) { return val; } - FMT_CONSTEXPR bool map(bool val) { return val; } +Allocator &Allocator::DefaultAllocator() { + return *DefaultAllocatorReference(); +} - template ::value)> - FMT_CONSTEXPR char_type map(T val) { - static_assert( - std::is_same::value || std::is_same::value, - "mixing character types is disallowed"); - return val; - } +//===--------------------------------------------------------------------===// +// Debug Info (extended) +//===--------------------------------------------------------------------===// +#ifdef DEBUG +AllocatorDebugInfo::AllocatorDebugInfo() { + allocation_count = 0; +} +AllocatorDebugInfo::~AllocatorDebugInfo() { +#ifdef DUCKDB_DEBUG_ALLOCATION + if (allocation_count != 0) { + printf("Outstanding allocations found for Allocator\n"); + for (auto &entry : pointers) { + printf("Allocation of size %llu at address %p\n", entry.second.first, (void *)entry.first); + printf("Stack trace:\n%s\n", entry.second.second.c_str()); + printf("\n"); + } + } +#endif + //! Verify that there is no outstanding memory still associated with the batched allocator + //! Only works for access to the batched allocator through the batched allocator interface + //! If this assertion triggers, enable DUCKDB_DEBUG_ALLOCATION for more information about the allocations + D_ASSERT(allocation_count == 0); +} - FMT_CONSTEXPR float map(float val) { return val; } - FMT_CONSTEXPR double map(double val) { return val; } - FMT_CONSTEXPR long double map(long double val) { return val; } +void AllocatorDebugInfo::AllocateData(data_ptr_t pointer, idx_t size) { + allocation_count += size; +#ifdef DUCKDB_DEBUG_ALLOCATION + lock_guard l(pointer_lock); + pointers[pointer] = make_pair(size, Exception::GetStackTrace()); +#endif +} - FMT_CONSTEXPR const char_type* map(char_type* val) { return val; } - FMT_CONSTEXPR const char_type* map(const char_type* val) { return val; } - template ::value)> - FMT_CONSTEXPR basic_string_view map(const T& val) { - static_assert(std::is_same>::value, - "mixing character types is disallowed"); - return to_string_view(val); - } - template , T>::value && - !is_string::value)> - FMT_CONSTEXPR basic_string_view map(const T& val) { - return basic_string_view(val); - } - template < - typename T, - FMT_ENABLE_IF( - std::is_constructible, T>::value && - !std::is_constructible, T>::value && - !is_string::value && !has_formatter::value)> - FMT_CONSTEXPR basic_string_view map(const T& val) { - return std_string_view(val); - } - FMT_CONSTEXPR const char* map(const signed char* val) { - static_assert(std::is_same::value, "invalid string type"); - return reinterpret_cast(val); - } - FMT_CONSTEXPR const char* map(const unsigned char* val) { - static_assert(std::is_same::value, "invalid string type"); - return reinterpret_cast(val); - } +void AllocatorDebugInfo::FreeData(data_ptr_t pointer, idx_t size) { + D_ASSERT(allocation_count >= size); + allocation_count -= size; +#ifdef DUCKDB_DEBUG_ALLOCATION + lock_guard l(pointer_lock); + // verify that the pointer exists + D_ASSERT(pointers.find(pointer) != pointers.end()); + // verify that the stored size matches the passed in size + D_ASSERT(pointers[pointer].first == size); + // erase the pointer + pointers.erase(pointer); +#endif +} - FMT_CONSTEXPR const void* map(void* val) { return val; } - FMT_CONSTEXPR const void* map(const void* val) { return val; } - FMT_CONSTEXPR const void* map(std::nullptr_t val) { return val; } - template FMT_CONSTEXPR int map(const T*) { - // Formatting of arbitrary pointers is disallowed. If you want to output - // a pointer cast it to "void *" or "const void *". In particular, this - // forbids formatting of "[const] volatile char *" which is printed as bool - // by iostreams. - static_assert(!sizeof(T), "formatting of non-void pointers is disallowed"); - return 0; - } +void AllocatorDebugInfo::ReallocateData(data_ptr_t pointer, data_ptr_t new_pointer, idx_t old_size, idx_t new_size) { + FreeData(pointer, old_size); + AllocateData(new_pointer, new_size); +} - template ::value && - !has_formatter::value && - !has_fallback_formatter::value)> - FMT_CONSTEXPR auto map(const T& val) -> decltype( - map(static_cast::type>(val))) { - return map(static_cast::type>(val)); - } - template < - typename T, - FMT_ENABLE_IF( - !is_string::value && !is_char::value && - !std::is_constructible, T>::value && - (has_formatter::value || - (has_fallback_formatter::value && - !std::is_constructible, T>::value)))> - FMT_CONSTEXPR const T& map(const T& val) { - return val; - } +#endif - template - FMT_CONSTEXPR const named_arg_base& map( - const named_arg& val) { - auto arg = make_arg(val.value); - std::memcpy(val.data, &arg, sizeof(arg)); - return val; - } -}; +} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/arrow/arrow_appender.hpp +// +// +//===----------------------------------------------------------------------===// -// A type constant after applying arg_mapper. -template -using mapped_type_constant = - type_constant().map(std::declval())), - typename Context::char_type>; -enum { packed_arg_bits = 5 }; -// Maximum number of arguments with packed types. -enum { max_packed_args = 63 / packed_arg_bits }; -enum : unsigned long long { is_unpacked_bit = 1ULL << 63 }; -template class arg_map; -} // namespace internal -// A formatting argument. It is a trivially copyable/constructible type to -// allow storage in basic_memory_buffer. -template class basic_format_arg { - private: - internal::value value_; - internal::type type_; - template - friend FMT_CONSTEXPR basic_format_arg internal::make_arg( - const T& value); +struct ArrowSchema; - template - friend FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis, - const basic_format_arg& arg) - -> decltype(vis(0)); +namespace duckdb { - friend class basic_format_args; - friend class internal::arg_map; +struct ArrowAppendData; - using char_type = typename Context::char_type; +//! The ArrowAppender class can be used to incrementally construct an arrow array by appending data chunks into it +class ArrowAppender { +public: + DUCKDB_API ArrowAppender(vector types, idx_t initial_capacity); + DUCKDB_API ~ArrowAppender(); - public: - class handle { - public: - explicit handle(internal::custom_value custom) : custom_(custom) {} + //! Append a data chunk to the underlying arrow array + DUCKDB_API void Append(DataChunk &input); + //! Returns the underlying arrow array + DUCKDB_API ArrowArray Finalize(); - void format(basic_format_parse_context& parse_ctx, - Context& ctx) const { - custom_.format(custom_.value, parse_ctx, ctx); - } +private: + //! The types of the chunks that will be appended in + vector types; + //! The root arrow append data + vector> root_data; + //! The total row count that has been appended + idx_t row_count = 0; +}; - private: - internal::custom_value custom_; - }; +} // namespace duckdb - FMT_CONSTEXPR basic_format_arg() : type_(internal::none_type) {} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/arrow/arrow_buffer.hpp +// +// +//===----------------------------------------------------------------------===// - FMT_CONSTEXPR explicit operator bool() const FMT_NOEXCEPT { - return type_ != internal::none_type; - } - internal::type type() const { return type_; } - bool is_integral() const { return internal::is_integral_type(type_); } - bool is_arithmetic() const { return internal::is_arithmetic_type(type_); } -}; -/** - \rst - Visits an argument dispatching to the appropriate visit method based on - the argument type. For example, if the argument type is ``double`` then - ``vis(value)`` will be called with the value of type ``double``. - \endrst - */ -template -FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis, - const basic_format_arg& arg) - -> decltype(vis(0)) { - using char_type = typename Context::char_type; - switch (arg.type_) { - case internal::none_type: - break; - case internal::named_arg_type: - FMT_ASSERT(false, "invalid argument type"); - break; - case internal::int_type: - return vis(arg.value_.int_value); - case internal::uint_type: - return vis(arg.value_.uint_value); - case internal::long_long_type: - return vis(arg.value_.long_long_value); - case internal::ulong_long_type: - return vis(arg.value_.ulong_long_value); -#if FMT_USE_INT128 - case internal::int128_type: - return vis(arg.value_.int128_value); - case internal::uint128_type: - return vis(arg.value_.uint128_value); -#else - case internal::int128_type: - case internal::uint128_type: - break; -#endif - case internal::bool_type: - return vis(arg.value_.bool_value); - case internal::char_type: - return vis(arg.value_.char_value); - case internal::float_type: - return vis(arg.value_.float_value); - case internal::double_type: - return vis(arg.value_.double_value); - case internal::long_double_type: - return vis(arg.value_.long_double_value); - case internal::cstring_type: - return vis(arg.value_.string.data); - case internal::string_type: - return vis(basic_string_view(arg.value_.string.data, - arg.value_.string.size)); - case internal::pointer_type: - return vis(arg.value_.pointer); - case internal::custom_type: - return vis(typename basic_format_arg::handle(arg.value_.custom)); - } - return vis(monostate()); -} -namespace internal { -// A map from argument names to their values for named arguments. -template class arg_map { - private: - using char_type = typename Context::char_type; +struct ArrowSchema; - struct entry { - basic_string_view name; - basic_format_arg arg; - }; +namespace duckdb { - entry* map_; - unsigned size_; +struct ArrowBuffer { + static constexpr const idx_t MINIMUM_SHRINK_SIZE = 4096; - void push_back(value val) { - const auto& named = *val.named_arg; - map_[size_] = {named.name, named.template deserialize()}; - ++size_; - } + ArrowBuffer() : dataptr(nullptr), count(0), capacity(0) { + } + ~ArrowBuffer() { + if (!dataptr) { + return; + } + free(dataptr); + dataptr = nullptr; + count = 0; + capacity = 0; + } + // disable copy constructors + ArrowBuffer(const ArrowBuffer &other) = delete; + ArrowBuffer &operator=(const ArrowBuffer &) = delete; + //! enable move constructors + ArrowBuffer(ArrowBuffer &&other) noexcept { + std::swap(dataptr, other.dataptr); + std::swap(count, other.count); + std::swap(capacity, other.capacity); + } + ArrowBuffer &operator=(ArrowBuffer &&other) noexcept { + std::swap(dataptr, other.dataptr); + std::swap(count, other.count); + std::swap(capacity, other.capacity); + return *this; + } - public: - arg_map(const arg_map&) = delete; - void operator=(const arg_map&) = delete; - arg_map() : map_(nullptr), size_(0) {} - void init(const basic_format_args& args); - ~arg_map() { delete[] map_; } + void reserve(idx_t bytes) { // NOLINT + auto new_capacity = NextPowerOfTwo(bytes); + if (new_capacity <= capacity) { + return; + } + ReserveInternal(new_capacity); + } - basic_format_arg find(basic_string_view name) const { - // The list is unsorted, so just return the first matching name. - for (entry *it = map_, *end = map_ + size_; it != end; ++it) { - if (it->name == name) return it->arg; - } - return {}; - } -}; + void resize(idx_t bytes) { // NOLINT + reserve(bytes); + count = bytes; + } -// A type-erased reference to an std::locale to avoid heavy include. -class locale_ref { - private: - const void* locale_; // A type-erased pointer to std::locale. + void resize(idx_t bytes, data_t value) { // NOLINT + reserve(bytes); + for (idx_t i = count; i < bytes; i++) { + dataptr[i] = value; + } + count = bytes; + } - public: - locale_ref() : locale_(nullptr) {} - template explicit locale_ref(const Locale& loc); + idx_t size() { // NOLINT + return count; + } - explicit operator bool() const FMT_NOEXCEPT { return locale_ != nullptr; } + data_ptr_t data() { // NOLINT + return dataptr; + } - template Locale get() const; +private: + void ReserveInternal(idx_t bytes) { + if (dataptr) { + dataptr = (data_ptr_t)realloc(dataptr, bytes); + } else { + dataptr = (data_ptr_t)malloc(bytes); + } + capacity = bytes; + } + +private: + data_ptr_t dataptr = nullptr; + idx_t count = 0; + idx_t capacity = 0; }; -template constexpr unsigned long long encode_types() { return 0; } +} // namespace duckdb -template -constexpr unsigned long long encode_types() { - return mapped_type_constant::value | - (encode_types() << packed_arg_bits); -} -template -FMT_CONSTEXPR basic_format_arg make_arg(const T& value) { - basic_format_arg arg; - arg.type_ = mapped_type_constant::value; - arg.value_ = arg_mapper().map(value); - return arg; -} +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/array.hpp +// +// +//===----------------------------------------------------------------------===// -template -inline value make_arg(const T& val) { - return arg_mapper().map(val); -} -template -inline basic_format_arg make_arg(const T& value) { - return make_arg(value); + +#include + +namespace duckdb { +using std::array; } -} // namespace internal -// Formatting context. -template class basic_format_context { - public: - /** The character type for the output. */ - using char_type = Char; - private: - OutputIt out_; - basic_format_args args_; - internal::arg_map map_; - internal::locale_ref loc_; - public: - using iterator = OutputIt; - using format_arg = basic_format_arg; - template using formatter_type = formatter; - basic_format_context(const basic_format_context&) = delete; - void operator=(const basic_format_context&) = delete; - /** - Constructs a ``basic_format_context`` object. References to the arguments are - stored in the object so make sure they have appropriate lifetimes. - */ - basic_format_context(OutputIt out, - basic_format_args ctx_args, - internal::locale_ref loc = internal::locale_ref()) - : out_(out), args_(ctx_args), loc_(loc) {} +namespace duckdb { - format_arg arg(int id) const { return args_.get(id); } +//===--------------------------------------------------------------------===// +// Arrow append data +//===--------------------------------------------------------------------===// +typedef void (*initialize_t)(ArrowAppendData &result, const LogicalType &type, idx_t capacity); +typedef void (*append_vector_t)(ArrowAppendData &append_data, Vector &input, idx_t size); +typedef void (*finalize_t)(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result); - // Checks if manual indexing is used and returns the argument with the - // specified name. - format_arg arg(basic_string_view name); +struct ArrowAppendData { + // the buffers of the arrow vector + ArrowBuffer validity; + ArrowBuffer main_buffer; + ArrowBuffer aux_buffer; - internal::error_handler error_handler() { return {}; } - void on_error(const char* message) { error_handler().on_error(message); } + idx_t row_count = 0; + idx_t null_count = 0; - // Returns an iterator to the beginning of the output range. - iterator out() { return out_; } + // function pointers for construction + initialize_t initialize = nullptr; + append_vector_t append_vector = nullptr; + finalize_t finalize = nullptr; - // Advances the begin iterator to ``it``. - void advance_to(iterator it) { out_ = it; } + // child data (if any) + vector> child_data; - internal::locale_ref locale() { return loc_; } + //! the arrow array C API data, only set after Finalize + unique_ptr array; + duckdb::array buffers = {{nullptr, nullptr, nullptr}}; + vector child_pointers; }; -template -using buffer_context = - basic_format_context>, - Char>; -using format_context = buffer_context; -using wformat_context = buffer_context; - -/** - \rst - An array of references to arguments. It can be implicitly converted into - `~fmt::basic_format_args` for passing into type-erased formatting functions - such as `~fmt::vformat`. - \endrst - */ -template class format_arg_store { - private: - static const size_t num_args = sizeof...(Args); - static const bool is_packed = num_args < internal::max_packed_args; +//===--------------------------------------------------------------------===// +// ArrowAppender +//===--------------------------------------------------------------------===// +static unique_ptr InitializeArrowChild(const LogicalType &type, idx_t capacity); +static ArrowArray *FinalizeArrowChild(const LogicalType &type, ArrowAppendData &append_data); - using value_type = conditional_t, - basic_format_arg>; +ArrowAppender::ArrowAppender(vector types_p, idx_t initial_capacity) : types(move(types_p)) { + for (auto &type : types) { + auto entry = InitializeArrowChild(type, initial_capacity); + root_data.push_back(move(entry)); + } +} - // If the arguments are not packed, add one more element to mark the end. - value_type data_[num_args + (num_args == 0 ? 1 : 0)]; +ArrowAppender::~ArrowAppender() { +} - friend class basic_format_args; +//===--------------------------------------------------------------------===// +// Append Helper Functions +//===--------------------------------------------------------------------===// +static void GetBitPosition(idx_t row_idx, idx_t ¤t_byte, uint8_t ¤t_bit) { + current_byte = row_idx / 8; + current_bit = row_idx % 8; +} - public: - static constexpr unsigned long long types = - is_packed ? internal::encode_types() - : internal::is_unpacked_bit | num_args; +static void UnsetBit(uint8_t *data, idx_t current_byte, uint8_t current_bit) { + data[current_byte] &= ~((uint64_t)1 << current_bit); +} - format_arg_store(const Args&... args) - : data_{internal::make_arg(args)...} {} -}; +static void NextBit(idx_t ¤t_byte, uint8_t ¤t_bit) { + current_bit++; + if (current_bit == 8) { + current_byte++; + current_bit = 0; + } +} -/** - \rst - Constructs an `~fmt::format_arg_store` object that contains references to - arguments and can be implicitly converted to `~fmt::format_args`. `Context` - can be omitted in which case it defaults to `~fmt::context`. - See `~fmt::arg` for lifetime considerations. - \endrst - */ -template -inline format_arg_store make_format_args( - const Args&... args) { - return {args...}; +static void ResizeValidity(ArrowBuffer &buffer, idx_t row_count) { + auto byte_count = (row_count + 7) / 8; + buffer.resize(byte_count, 0xFF); } -/** Formatting arguments. */ -template class basic_format_args { - public: - using size_type = int; - using format_arg = basic_format_arg; +static void SetNull(ArrowAppendData &append_data, uint8_t *validity_data, idx_t current_byte, uint8_t current_bit) { + UnsetBit(validity_data, current_byte, current_bit); + append_data.null_count++; +} - private: - // To reduce compiled code size per formatting function call, types of first - // max_packed_args arguments are passed in the types_ field. - unsigned long long types_; - union { - // If the number of arguments is less than max_packed_args, the argument - // values are stored in values_, otherwise they are stored in args_. - // This is done to reduce compiled code size as storing larger objects - // may require more code (at least on x86-64) even if the same amount of - // data is actually copied to stack. It saves ~10% on the bloat test. - const internal::value* values_; - const format_arg* args_; - }; +static void AppendValidity(ArrowAppendData &append_data, UnifiedVectorFormat &format, idx_t size) { + // resize the buffer, filling the validity buffer with all valid values + ResizeValidity(append_data.validity, append_data.row_count + size); + if (format.validity.AllValid()) { + // if all values are valid we don't need to do anything else + return; + } - bool is_packed() const { return (types_ & internal::is_unpacked_bit) == 0; } + // otherwise we iterate through the validity mask + auto validity_data = (uint8_t *)append_data.validity.data(); + uint8_t current_bit; + idx_t current_byte; + GetBitPosition(append_data.row_count, current_byte, current_bit); + for (idx_t i = 0; i < size; i++) { + auto source_idx = format.sel->get_index(i); + // append the validity mask + if (!format.validity.RowIsValid(source_idx)) { + SetNull(append_data, validity_data, current_byte, current_bit); + } + NextBit(current_byte, current_bit); + } +} - internal::type type(int index) const { - int shift = index * internal::packed_arg_bits; - unsigned int mask = (1 << internal::packed_arg_bits) - 1; - return static_cast((types_ >> shift) & mask); - } +//===--------------------------------------------------------------------===// +// Scalar Types +//===--------------------------------------------------------------------===// +struct ArrowScalarConverter { + template + static TGT Operation(SRC input) { + return input; + } - friend class internal::arg_map; + static bool SkipNulls() { + return false; + } - void set_data(const internal::value* values) { values_ = values; } - void set_data(const format_arg* args) { args_ = args; } + template + static void SetNull(TGT &value) { + } +}; - format_arg do_get(int index) const { - format_arg arg; - if (!is_packed()) { - auto num_args = max_size(); - if (index < num_args) arg = args_[index]; - return arg; - } - if (index > internal::max_packed_args) return arg; - arg.type_ = type(index); - if (arg.type_ == internal::none_type) return arg; - internal::value& val = arg.value_; - val = values_[index]; - return arg; - } +struct ArrowIntervalConverter { + template + static TGT Operation(SRC input) { + return Interval::GetMilli(input); + } - public: - basic_format_args() : types_(0) {} + static bool SkipNulls() { + return true; + } - /** - \rst - Constructs a `basic_format_args` object from `~fmt::format_arg_store`. - \endrst - */ - template - basic_format_args(const format_arg_store& store) - : types_(store.types) { - set_data(store.data_); - } + template + static void SetNull(TGT &value) { + value = 0; + } +}; - /** - \rst - Constructs a `basic_format_args` object from a dynamic set of arguments. - \endrst - */ - basic_format_args(const format_arg* args, int count) - : types_(internal::is_unpacked_bit | internal::to_unsigned(count)) { - set_data(args); - } +template +struct ArrowScalarBaseData { + static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { + UnifiedVectorFormat format; + input.ToUnifiedFormat(size, format); - /** Returns the argument at specified index. */ - format_arg get(int index) const { - format_arg arg = do_get(index); - if (arg.type_ == internal::named_arg_type) - arg = arg.value_.named_arg->template deserialize(); - return arg; - } + // append the validity mask + AppendValidity(append_data, format, size); - int max_size() const { - unsigned long long max_packed = internal::max_packed_args; - return static_cast(is_packed() ? max_packed - : types_ & ~internal::is_unpacked_bit); - } -}; + // append the main data + append_data.main_buffer.resize(append_data.main_buffer.size() + sizeof(TGT) * size); + auto data = (SRC *)format.data; + auto result_data = (TGT *)append_data.main_buffer.data(); -/** An alias to ``basic_format_args``. */ -// It is a separate type rather than an alias to make symbols readable. -struct format_args : basic_format_args { - template - format_args(Args&&... args) - : basic_format_args(std::forward(args)...) {} -}; -struct wformat_args : basic_format_args { - template - wformat_args(Args&&... args) - : basic_format_args(std::forward(args)...) {} + for (idx_t i = 0; i < size; i++) { + auto source_idx = format.sel->get_index(i); + auto result_idx = append_data.row_count + i; + + if (OP::SkipNulls() && !format.validity.RowIsValid(source_idx)) { + OP::template SetNull(result_data[result_idx]); + continue; + } + result_data[result_idx] = OP::template Operation(data[source_idx]); + } + append_data.row_count += size; + } }; -template struct is_contiguous : std::false_type {}; +template +struct ArrowScalarData : public ArrowScalarBaseData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + result.main_buffer.reserve(capacity * sizeof(TGT)); + } -template -struct is_contiguous> : std::true_type {}; + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + result->n_buffers = 2; + result->buffers[1] = append_data.main_buffer.data(); + } +}; -template -struct is_contiguous> : std::true_type {}; +//===--------------------------------------------------------------------===// +// Enums +//===--------------------------------------------------------------------===// +template +struct ArrowEnumData : public ArrowScalarBaseData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + result.main_buffer.reserve(capacity * sizeof(TGT)); + // construct the enum child data + auto enum_data = InitializeArrowChild(LogicalType::VARCHAR, EnumType::GetSize(type)); + enum_data->append_vector(*enum_data, EnumType::GetValuesInsertOrder(type), EnumType::GetSize(type)); + result.child_data.push_back(move(enum_data)); + } -namespace internal { + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + result->n_buffers = 2; + result->buffers[1] = append_data.main_buffer.data(); + // finalize the enum child data, and assign it to the dictionary + result->dictionary = FinalizeArrowChild(LogicalType::VARCHAR, *append_data.child_data[0]); + } +}; -template -struct is_contiguous_back_insert_iterator : std::false_type {}; -template -struct is_contiguous_back_insert_iterator> - : is_contiguous {}; +//===--------------------------------------------------------------------===// +// Boolean +//===--------------------------------------------------------------------===// +struct ArrowBoolData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + auto byte_count = (capacity + 7) / 8; + result.main_buffer.reserve(byte_count); + } -template struct named_arg_base { - basic_string_view name; + static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { + UnifiedVectorFormat format; + input.ToUnifiedFormat(size, format); - // Serialized value. - mutable char data[sizeof(basic_format_arg>)]; + // we initialize both the validity and the bit set to 1's + ResizeValidity(append_data.validity, append_data.row_count + size); + ResizeValidity(append_data.main_buffer, append_data.row_count + size); + auto data = (bool *)format.data; - named_arg_base(basic_string_view nm) : name(nm) {} + auto result_data = (uint8_t *)append_data.main_buffer.data(); + auto validity_data = (uint8_t *)append_data.validity.data(); + uint8_t current_bit; + idx_t current_byte; + GetBitPosition(append_data.row_count, current_byte, current_bit); + for (idx_t i = 0; i < size; i++) { + auto source_idx = format.sel->get_index(i); + // append the validity mask + if (!format.validity.RowIsValid(source_idx)) { + SetNull(append_data, validity_data, current_byte, current_bit); + } else if (!data[source_idx]) { + UnsetBit(result_data, current_byte, current_bit); + } + NextBit(current_byte, current_bit); + } + append_data.row_count += size; + } - template basic_format_arg deserialize() const { - basic_format_arg arg; - std::memcpy(&arg, data, sizeof(basic_format_arg)); - return arg; - } + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + result->n_buffers = 2; + result->buffers[1] = append_data.main_buffer.data(); + } }; -template struct named_arg : named_arg_base { - const T& value; +//===--------------------------------------------------------------------===// +// Varchar +//===--------------------------------------------------------------------===// +struct ArrowVarcharConverter { + template + static idx_t GetLength(SRC input) { + return input.GetSize(); + } - named_arg(basic_string_view name, const T& val) - : named_arg_base(name), value(val) {} + template + static void WriteData(data_ptr_t target, SRC input) { + memcpy(target, input.GetDataUnsafe(), input.GetSize()); + } }; -template ::value)> -inline void check_format_string(const S&) { -#if defined(FMT_ENFORCE_COMPILE_STRING) - static_assert(is_compile_string::value, - "FMT_ENFORCE_COMPILE_STRING requires all format strings to " - "utilize FMT_STRING() or fmt()."); -#endif -} -template ::value)> -void check_format_string(S); +struct ArrowUUIDConverter { + template + static idx_t GetLength(SRC input) { + return UUID::STRING_SIZE; + } -struct view {}; -template struct bool_pack; -template -using all_true = - std::is_same, bool_pack>; + template + static void WriteData(data_ptr_t target, SRC input) { + UUID::ToString(input, (char *)target); + } +}; -template > -inline format_arg_store, remove_reference_t...> -make_args_checked(const S& format_str, - const remove_reference_t&... args) { - static_assert(all_true<(!std::is_base_of>() || - !std::is_reference())...>::value, - "passing views as lvalues is disallowed"); - check_format_string>...>(format_str); - return {args...}; -} +template +struct ArrowVarcharData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + result.main_buffer.reserve((capacity + 1) * sizeof(uint32_t)); + result.aux_buffer.reserve(capacity); + } -template -std::basic_string vformat(basic_string_view format_str, - basic_format_args> args); + static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { + UnifiedVectorFormat format; + input.ToUnifiedFormat(size, format); -template -typename buffer_context::iterator vformat_to( - buffer& buf, basic_string_view format_str, - basic_format_args> args); -} // namespace internal + // resize the validity mask and set up the validity buffer for iteration + ResizeValidity(append_data.validity, append_data.row_count + size); + auto validity_data = (uint8_t *)append_data.validity.data(); -/** - \rst - Returns a named argument to be used in a formatting function. + // resize the offset buffer - the offset buffer holds the offsets into the child array + append_data.main_buffer.resize(append_data.main_buffer.size() + sizeof(uint32_t) * (size + 1)); + auto data = (SRC *)format.data; + auto offset_data = (uint32_t *)append_data.main_buffer.data(); + if (append_data.row_count == 0) { + // first entry + offset_data[0] = 0; + } + // now append the string data to the auxiliary buffer + // the auxiliary buffer's length depends on the string lengths, so we resize as required + auto last_offset = offset_data[append_data.row_count]; + for (idx_t i = 0; i < size; i++) { + auto source_idx = format.sel->get_index(i); + auto offset_idx = append_data.row_count + i + 1; - The named argument holds a reference and does not extend the lifetime - of its arguments. - Consequently, a dangling reference can accidentally be created. - The user should take care to only pass this function temporaries when - the named argument is itself a temporary, as per the following example. + if (!format.validity.RowIsValid(source_idx)) { + uint8_t current_bit; + idx_t current_byte; + GetBitPosition(append_data.row_count + i, current_byte, current_bit); + SetNull(append_data, validity_data, current_byte, current_bit); + offset_data[offset_idx] = last_offset; + continue; + } - **Example**:: + auto string_length = OP::GetLength(data[source_idx]); - fmt::print("Elapsed time: {s:.2f} seconds", fmt::arg("s", 1.23)); - \endrst - */ -template > -inline internal::named_arg arg(const S& name, const T& arg) { - static_assert(internal::is_string::value, ""); - return {name, arg}; -} + // append the offset data + auto current_offset = last_offset + string_length; + offset_data[offset_idx] = current_offset; -// Disable nested named arguments, e.g. ``arg("a", arg("b", 42))``. -template -void arg(S, internal::named_arg) = delete; + // resize the string buffer if required, and write the string data + append_data.aux_buffer.resize(current_offset); + OP::WriteData(append_data.aux_buffer.data() + last_offset, data[source_idx]); -/** Formats a string and writes the output to ``out``. */ -// GCC 8 and earlier cannot handle std::back_insert_iterator with -// vformat_to(...) overload, so SFINAE on iterator type instead. -template , - FMT_ENABLE_IF( - internal::is_contiguous_back_insert_iterator::value)> -OutputIt vformat_to(OutputIt out, const S& format_str, - basic_format_args> args) { - using container = remove_reference_t; - internal::container_buffer buf((internal::get_container(out))); - internal::vformat_to(buf, to_string_view(format_str), args); - return out; -} + last_offset = current_offset; + } + append_data.row_count += size; + } -template ::value&& internal::is_string::value)> -inline std::back_insert_iterator format_to( - std::back_insert_iterator out, const S& format_str, - Args&&... args) { - return vformat_to( - out, to_string_view(format_str), - {internal::make_args_checked(format_str, args...)}); -} + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + result->n_buffers = 3; + result->buffers[1] = append_data.main_buffer.data(); + result->buffers[2] = append_data.aux_buffer.data(); + } +}; -template > -inline std::basic_string vformat( - const S& format_str, basic_format_args> args) { - return internal::vformat(to_string_view(format_str), args); -} +//===--------------------------------------------------------------------===// +// Structs +//===--------------------------------------------------------------------===// +struct ArrowStructData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + auto &children = StructType::GetChildTypes(type); + for (auto &child : children) { + auto child_buffer = InitializeArrowChild(child.second, capacity); + result.child_data.push_back(move(child_buffer)); + } + } -/** - \rst - Formats arguments and returns the result as a string. + static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { + UnifiedVectorFormat format; + input.ToUnifiedFormat(size, format); - **Example**:: + AppendValidity(append_data, format, size); + // append the children of the struct + auto &children = StructVector::GetEntries(input); + for (idx_t child_idx = 0; child_idx < children.size(); child_idx++) { + auto &child = children[child_idx]; + auto &child_data = *append_data.child_data[child_idx]; + child_data.append_vector(child_data, *child, size); + } + append_data.row_count += size; + } - #include - std::string message = fmt::format("The answer is {}", 42); - \endrst -*/ -// Pass char_t as a default template parameter instead of using -// std::basic_string> to reduce the symbol size. -template > -inline std::basic_string format(const S& format_str, Args&&... args) { - return internal::vformat( - to_string_view(format_str), - {internal::make_args_checked(format_str, args...)}); -} + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + result->n_buffers = 1; -FMT_END_NAMESPACE + auto &child_types = StructType::GetChildTypes(type); + append_data.child_pointers.resize(child_types.size()); + result->children = append_data.child_pointers.data(); + result->n_children = child_types.size(); + for (idx_t i = 0; i < child_types.size(); i++) { + auto &child_type = child_types[i].second; + append_data.child_pointers[i] = FinalizeArrowChild(child_type, *append_data.child_data[i]); + } + } +}; -#endif // FMT_CORE_H_ +//===--------------------------------------------------------------------===// +// Lists +//===--------------------------------------------------------------------===// +void AppendListOffsets(ArrowAppendData &append_data, UnifiedVectorFormat &format, idx_t size, + vector &child_sel) { + // resize the offset buffer - the offset buffer holds the offsets into the child array + append_data.main_buffer.resize(append_data.main_buffer.size() + sizeof(uint32_t) * (size + 1)); + auto data = (list_entry_t *)format.data; + auto offset_data = (uint32_t *)append_data.main_buffer.data(); + if (append_data.row_count == 0) { + // first entry + offset_data[0] = 0; + } + // set up the offsets using the list entries + auto last_offset = offset_data[append_data.row_count]; + for (idx_t i = 0; i < size; i++) { + auto source_idx = format.sel->get_index(i); + auto offset_idx = append_data.row_count + i + 1; + if (!format.validity.RowIsValid(source_idx)) { + offset_data[offset_idx] = last_offset; + continue; + } -// LICENSE_CHANGE_END + // append the offset data + auto list_length = data[source_idx].length; + last_offset += list_length; + offset_data[offset_idx] = last_offset; + for (idx_t k = 0; k < list_length; k++) { + child_sel.push_back(data[source_idx].offset + k); + } + } +} -#include -#include -#include -#include -#include -#include -#include +struct ArrowListData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + auto &child_type = ListType::GetChildType(type); + result.main_buffer.reserve((capacity + 1) * sizeof(uint32_t)); + auto child_buffer = InitializeArrowChild(child_type, capacity); + result.child_data.push_back(move(child_buffer)); + } -#ifdef __clang__ -# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) -#else -# define FMT_CLANG_VERSION 0 -#endif + static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { + UnifiedVectorFormat format; + input.ToUnifiedFormat(size, format); -#ifdef __INTEL_COMPILER -# define FMT_ICC_VERSION __INTEL_COMPILER -#elif defined(__ICL) -# define FMT_ICC_VERSION __ICL -#else -# define FMT_ICC_VERSION 0 -#endif + vector child_indices; + AppendValidity(append_data, format, size); + AppendListOffsets(append_data, format, size, child_indices); -#ifdef __NVCC__ -# define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__) -#else -# define FMT_CUDA_VERSION 0 -#endif + // append the child vector of the list + SelectionVector child_sel(child_indices.data()); + auto &child = ListVector::GetEntry(input); + auto child_size = child_indices.size(); + child.Slice(child_sel, child_size); -#ifdef __has_builtin -# define FMT_HAS_BUILTIN(x) __has_builtin(x) -#else -# define FMT_HAS_BUILTIN(x) 0 -#endif + append_data.child_data[0]->append_vector(*append_data.child_data[0], child, child_size); + append_data.row_count += size; + } -#if FMT_HAS_CPP_ATTRIBUTE(fallthrough) && \ - (__cplusplus >= 201703 || FMT_GCC_VERSION != 0) -# define FMT_FALLTHROUGH [[fallthrough]] -#else -# define FMT_FALLTHROUGH -#endif + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + result->n_buffers = 2; + result->buffers[1] = append_data.main_buffer.data(); -#ifndef FMT_THROW -# if FMT_EXCEPTIONS -# if FMT_MSC_VER -FMT_BEGIN_NAMESPACE -namespace internal { -template inline void do_throw(const Exception& x) { - // Silence unreachable code warnings in MSVC because these are nearly - // impossible to fix in a generic code. - volatile bool b = true; - if (b) throw x; -} -} // namespace internal -FMT_END_NAMESPACE -# define FMT_THROW(x) internal::do_throw(x) -# else -# define FMT_THROW(x) throw x -# endif -# else -# define FMT_THROW(x) \ - do { \ - static_cast(sizeof(x)); \ - FMT_ASSERT(false, ""); \ - } while (false) -# endif -#endif + auto &child_type = ListType::GetChildType(type); + append_data.child_pointers.resize(1); + result->children = append_data.child_pointers.data(); + result->n_children = 1; + append_data.child_pointers[0] = FinalizeArrowChild(child_type, *append_data.child_data[0]); + } +}; -#ifndef FMT_USE_USER_DEFINED_LITERALS -// For Intel and NVIDIA compilers both they and the system gcc/msc support UDLs. -# if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 || \ - FMT_MSC_VER >= 1900) && \ - (!(FMT_ICC_VERSION || FMT_CUDA_VERSION) || FMT_ICC_VERSION >= 1500 || \ - FMT_CUDA_VERSION >= 700) -# define FMT_USE_USER_DEFINED_LITERALS 1 -# else -# define FMT_USE_USER_DEFINED_LITERALS 0 -# endif -#endif +//===--------------------------------------------------------------------===// +// Maps +//===--------------------------------------------------------------------===// +struct ArrowMapData { + static void Initialize(ArrowAppendData &result, const LogicalType &type, idx_t capacity) { + // map types are stored in a (too) clever way + // the main buffer holds the null values and the offsets + // then we have a single child, which is a struct of the map_type, and the key_type + result.main_buffer.reserve((capacity + 1) * sizeof(uint32_t)); -#ifndef FMT_USE_UDL_TEMPLATE -#define FMT_USE_UDL_TEMPLATE 0 -#endif + auto &key_type = MapType::KeyType(type); + auto &value_type = MapType::ValueType(type); + auto internal_struct = make_unique(); + internal_struct->child_data.push_back(InitializeArrowChild(key_type, capacity)); + internal_struct->child_data.push_back(InitializeArrowChild(value_type, capacity)); -// __builtin_clz is broken in clang with Microsoft CodeGen: -// https://github.com/fmtlib/fmt/issues/519 -#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clz)) && !FMT_MSC_VER -# define FMT_BUILTIN_CLZ(n) __builtin_clz(n) -#endif -#if (FMT_GCC_VERSION || FMT_HAS_BUILTIN(__builtin_clzll)) && !FMT_MSC_VER -# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) -#endif + result.child_data.push_back(move(internal_struct)); + } -// Some compilers masquerade as both MSVC and GCC-likes or otherwise support -// __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the -// MSVC intrinsics if the clz and clzll builtins are not available. -#if FMT_MSC_VER && !defined(FMT_BUILTIN_CLZLL) && !defined(_MANAGED) -# include // _BitScanReverse, _BitScanReverse64 + static void Append(ArrowAppendData &append_data, Vector &input, idx_t size) { + UnifiedVectorFormat format; + input.ToUnifiedFormat(size, format); -FMT_BEGIN_NAMESPACE -namespace internal { -// Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning. -# ifndef __clang__ -# pragma intrinsic(_BitScanReverse) -# endif -inline uint32_t clz(uint32_t x) { - unsigned long r = 0; - _BitScanReverse(&r, x); + AppendValidity(append_data, format, size); + // maps exist as a struct of two lists, e.g. STRUCT(key VARCHAR[], value VARCHAR[]) + // since both lists are the same, arrow tries to be smart by storing the offsets only once + // we can append the offsets from any of the two children + auto &children = StructVector::GetEntries(input); - FMT_ASSERT(x != 0, ""); - // Static analysis complains about using uninitialized data - // "r", but the only way that can happen is if "x" is 0, - // which the callers guarantee to not happen. -# pragma warning(suppress : 6102) - return 31 - r; -} -# define FMT_BUILTIN_CLZ(n) internal::clz(n) + UnifiedVectorFormat child_format; + children[0]->ToUnifiedFormat(size, child_format); + vector child_indices; + AppendListOffsets(append_data, child_format, size, child_indices); -# if defined(_WIN64) && !defined(__clang__) -# pragma intrinsic(_BitScanReverse64) -# endif + // now we can append the children to the lists + auto &struct_entries = StructVector::GetEntries(input); + D_ASSERT(struct_entries.size() == 2); + SelectionVector child_sel(child_indices.data()); + auto &key_vector = ListVector::GetEntry(*struct_entries[0]); + auto &value_vector = ListVector::GetEntry(*struct_entries[1]); + auto list_size = child_indices.size(); + key_vector.Slice(child_sel, list_size); + value_vector.Slice(child_sel, list_size); -inline uint32_t clzll(uint64_t x) { - unsigned long r = 0; -# ifdef _WIN64 - _BitScanReverse64(&r, x); -# else - // Scan the high 32 bits. - if (_BitScanReverse(&r, static_cast(x >> 32))) return 63 - (r + 32); + // perform the append + auto &struct_data = *append_data.child_data[0]; + auto &key_data = *struct_data.child_data[0]; + auto &value_data = *struct_data.child_data[1]; + key_data.append_vector(key_data, key_vector, list_size); + value_data.append_vector(value_data, value_vector, list_size); - // Scan the low 32 bits. - _BitScanReverse(&r, static_cast(x)); -# endif + append_data.row_count += size; + struct_data.row_count += size; + } - FMT_ASSERT(x != 0, ""); - // Static analysis complains about using uninitialized data - // "r", but the only way that can happen is if "x" is 0, - // which the callers guarantee to not happen. -# pragma warning(suppress : 6102) - return 63 - r; -} -# define FMT_BUILTIN_CLZLL(n) internal::clzll(n) -} // namespace internal -FMT_END_NAMESPACE -#endif + static void Finalize(ArrowAppendData &append_data, const LogicalType &type, ArrowArray *result) { + // set up the main map buffer + result->n_buffers = 2; + result->buffers[1] = append_data.main_buffer.data(); -// Enable the deprecated numeric alignment. -#ifndef FMT_NUMERIC_ALIGN -# define FMT_NUMERIC_ALIGN 1 -#endif + // the main map buffer has a single child: a struct + append_data.child_pointers.resize(1); + result->children = append_data.child_pointers.data(); + result->n_children = 1; + append_data.child_pointers[0] = FinalizeArrowChild(type, *append_data.child_data[0]); -// Enable the deprecated percent specifier. -#ifndef FMT_DEPRECATED_PERCENT -# define FMT_DEPRECATED_PERCENT 0 -#endif + // now that struct has two children: the key and the value type + auto &struct_data = *append_data.child_data[0]; + auto &struct_result = append_data.child_pointers[0]; + struct_data.child_pointers.resize(2); + struct_result->n_buffers = 1; + struct_result->n_children = 2; + struct_result->length = struct_data.child_data[0]->row_count; + struct_result->children = struct_data.child_pointers.data(); -FMT_BEGIN_NAMESPACE -namespace internal { + D_ASSERT(struct_data.child_data[0]->row_count == struct_data.child_data[1]->row_count); -// A helper function to suppress bogus "conditional expression is constant" -// warnings. -template inline T const_check(T value) { return value; } + auto &key_type = MapType::KeyType(type); + auto &value_type = MapType::ValueType(type); + struct_data.child_pointers[0] = FinalizeArrowChild(key_type, *struct_data.child_data[0]); + struct_data.child_pointers[1] = FinalizeArrowChild(value_type, *struct_data.child_data[1]); -// An equivalent of `*reinterpret_cast(&source)` that doesn't have -// undefined behavior (e.g. due to type aliasing). -// Example: uint64_t d = bit_cast(2.718); -template -inline Dest bit_cast(const Source& source) { - static_assert(sizeof(Dest) == sizeof(Source), "size mismatch"); - Dest dest; - std::memcpy(&dest, &source, sizeof(dest)); - return dest; + // keys cannot have null values + if (struct_data.child_pointers[0]->null_count > 0) { + throw std::runtime_error("Arrow doesn't accept NULL keys on Maps"); + } + } +}; + +//! Append a data chunk to the underlying arrow array +void ArrowAppender::Append(DataChunk &input) { + D_ASSERT(types == input.GetTypes()); + for (idx_t i = 0; i < input.ColumnCount(); i++) { + root_data[i]->append_vector(*root_data[i], input.data[i], input.size()); + } + row_count += input.size(); +} +//===--------------------------------------------------------------------===// +// Initialize Arrow Child +//===--------------------------------------------------------------------===// +template +static void InitializeFunctionPointers(ArrowAppendData &append_data) { + append_data.initialize = OP::Initialize; + append_data.append_vector = OP::Append; + append_data.finalize = OP::Finalize; } -inline bool is_big_endian() { - auto u = 1u; - struct bytes { - char data[sizeof(u)]; - }; - return bit_cast(u).data[0] == 0; +static void InitializeFunctionPointers(ArrowAppendData &append_data, const LogicalType &type) { + // handle special logical types + switch (type.id()) { + case LogicalTypeId::BOOLEAN: + InitializeFunctionPointers(append_data); + break; + case LogicalTypeId::TINYINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::SMALLINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::DATE: + case LogicalTypeId::INTEGER: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::TIME: + case LogicalTypeId::TIMESTAMP_SEC: + case LogicalTypeId::TIMESTAMP_MS: + case LogicalTypeId::TIMESTAMP: + case LogicalTypeId::TIMESTAMP_NS: + case LogicalTypeId::TIMESTAMP_TZ: + case LogicalTypeId::TIME_TZ: + case LogicalTypeId::BIGINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::HUGEINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::UTINYINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::USMALLINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::UINTEGER: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::UBIGINT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::FLOAT: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::DOUBLE: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::DECIMAL: + switch (type.InternalType()) { + case PhysicalType::INT16: + InitializeFunctionPointers>(append_data); + break; + case PhysicalType::INT32: + InitializeFunctionPointers>(append_data); + break; + case PhysicalType::INT64: + InitializeFunctionPointers>(append_data); + break; + case PhysicalType::INT128: + InitializeFunctionPointers>(append_data); + break; + default: + throw InternalException("Unsupported internal decimal type"); + } + break; + case LogicalTypeId::VARCHAR: + case LogicalTypeId::BLOB: + case LogicalTypeId::JSON: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::UUID: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::ENUM: + switch (type.InternalType()) { + case PhysicalType::UINT8: + InitializeFunctionPointers>(append_data); + break; + case PhysicalType::UINT16: + InitializeFunctionPointers>(append_data); + break; + case PhysicalType::UINT32: + InitializeFunctionPointers>(append_data); + break; + default: + throw InternalException("Unsupported internal enum type"); + } + break; + case LogicalTypeId::INTERVAL: + InitializeFunctionPointers>(append_data); + break; + case LogicalTypeId::STRUCT: + InitializeFunctionPointers(append_data); + break; + case LogicalTypeId::LIST: + InitializeFunctionPointers(append_data); + break; + case LogicalTypeId::MAP: + InitializeFunctionPointers(append_data); + break; + default: + throw InternalException("Unsupported type in DuckDB -> Arrow Conversion: %s\n", type.ToString()); + } } -// A fallback implementation of uintptr_t for systems that lack it. -struct fallback_uintptr { - unsigned char value[sizeof(void*)]; +unique_ptr InitializeArrowChild(const LogicalType &type, idx_t capacity) { + auto result = make_unique(); + InitializeFunctionPointers(*result, type); - fallback_uintptr() = default; - explicit fallback_uintptr(const void* p) { - *this = bit_cast(p); - if (is_big_endian()) { - for (size_t i = 0, j = sizeof(void*) - 1; i < j; ++i, --j) - std::swap(value[i], value[j]); - } - } -}; -#ifdef UINTPTR_MAX -using uintptr_t = ::uintptr_t; -inline uintptr_t to_uintptr(const void* p) { return bit_cast(p); } -#else -using uintptr_t = fallback_uintptr; -inline fallback_uintptr to_uintptr(const void* p) { - return fallback_uintptr(p); + auto byte_count = (capacity + 7) / 8; + result->validity.reserve(byte_count); + result->initialize(*result, type, capacity); + return result; } -#endif -// Returns the largest possible value for type T. Same as -// std::numeric_limits::max() but shorter and not affected by the max macro. -template constexpr T max_value() { - return (std::numeric_limits::max)(); -} -template constexpr int num_bits() { - return std::numeric_limits::digits; -} -template <> constexpr int num_bits() { - return static_cast(sizeof(void*) * - std::numeric_limits::digits); +static void ReleaseDuckDBArrowAppendArray(ArrowArray *array) { + if (!array || !array->release) { + return; + } + array->release = nullptr; + auto holder = static_cast(array->private_data); + delete holder; } -// An approximation of iterator_t for pre-C++20 systems. -template -using iterator_t = decltype(std::begin(std::declval())); +//===--------------------------------------------------------------------===// +// Finalize Arrow Child +//===--------------------------------------------------------------------===// +ArrowArray *FinalizeArrowChild(const LogicalType &type, ArrowAppendData &append_data) { + auto result = make_unique(); -// Detect the iterator category of *any* given type in a SFINAE-friendly way. -// Unfortunately, older implementations of std::iterator_traits are not safe -// for use in a SFINAE-context. -template -struct iterator_category : std::false_type {}; + result->private_data = nullptr; + result->release = ReleaseDuckDBArrowAppendArray; + result->n_children = 0; + result->null_count = 0; + result->offset = 0; + result->dictionary = nullptr; + result->buffers = append_data.buffers.data(); + result->null_count = append_data.null_count; + result->length = append_data.row_count; + result->buffers[0] = append_data.validity.data(); -template struct iterator_category { - using type = std::random_access_iterator_tag; -}; + if (append_data.finalize) { + append_data.finalize(append_data, type, result.get()); + } -template -struct iterator_category> { - using type = typename It::iterator_category; -}; + append_data.array = move(result); + return append_data.array.get(); +} -// Detect if *any* given type models the OutputIterator concept. -template class is_output_iterator { - // Check for mutability because all iterator categories derived from - // std::input_iterator_tag *may* also meet the requirements of an - // OutputIterator, thereby falling into the category of 'mutable iterators' - // [iterator.requirements.general] clause 4. The compiler reveals this - // property only at the point of *actually dereferencing* the iterator! - template - static decltype(*(std::declval())) test(std::input_iterator_tag); - template static char& test(std::output_iterator_tag); - template static const char& test(...); +//! Returns the underlying arrow array +ArrowArray ArrowAppender::Finalize() { + D_ASSERT(root_data.size() == types.size()); + auto root_holder = make_unique(); - using type = decltype(test(typename iterator_category::type{})); + ArrowArray result; + root_holder->child_pointers.resize(types.size()); + result.children = root_holder->child_pointers.data(); + result.n_children = types.size(); - public: - static const bool value = !std::is_const>::value; -}; + // Configure root array + result.length = row_count; + result.n_children = types.size(); + result.n_buffers = 1; + result.buffers = root_holder->buffers.data(); // there is no actual buffer there since we don't have NULLs + result.offset = 0; + result.null_count = 0; // needs to be 0 + result.dictionary = nullptr; + root_holder->child_data = move(root_data); -// A workaround for std::string not having mutable data() until C++17. -template inline Char* get_data(std::basic_string& s) { - return &s[0]; -} -template -inline typename Container::value_type* get_data(Container& c) { - return c.data(); -} + for (idx_t i = 0; i < root_holder->child_data.size(); i++) { + root_holder->child_pointers[i] = FinalizeArrowChild(types[i], *root_holder->child_data[i]); + } -#ifdef _SECURE_SCL -// Make a checked iterator to avoid MSVC warnings. -template using checked_ptr = stdext::checked_array_iterator; -template checked_ptr make_checked(T* p, std::size_t size) { - return {p, size}; + // Release ownership to caller + result.private_data = root_holder.release(); + result.release = ReleaseDuckDBArrowAppendArray; + return result; } -#else -template using checked_ptr = T*; -template inline T* make_checked(T* p, std::size_t) { return p; } -#endif -template ::value)> -inline checked_ptr reserve( - std::back_insert_iterator& it, std::size_t n) { - Container& c = get_container(it); - std::size_t size = c.size(); - c.resize(size + n); - return make_checked(get_data(c) + size, n); -} +} // namespace duckdb -template -inline Iterator& reserve(Iterator& it, std::size_t) { - return it; -} -// An output iterator that counts the number of objects written to it and -// discards them. -class counting_iterator { - private: - std::size_t count_; - public: - using iterator_category = std::output_iterator_tag; - using difference_type = std::ptrdiff_t; - using pointer = void; - using reference = void; - using _Unchecked_type = counting_iterator; // Mark iterator as checked. - struct value_type { - template void operator=(const T&) {} - }; - counting_iterator() : count_(0) {} - std::size_t count() const { return count_; } - counting_iterator& operator++() { - ++count_; - return *this; - } - counting_iterator operator++(int) { - auto it = *this; - ++*this; - return it; - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/types/sel_cache.hpp +// +// +//===----------------------------------------------------------------------===// - value_type operator*() const { return {}; } -}; -template class truncating_iterator_base { - protected: - OutputIt out_; - std::size_t limit_; - std::size_t count_; - truncating_iterator_base(OutputIt out, std::size_t limit) - : out_(out), limit_(limit), count_(0) {} - public: - using iterator_category = std::output_iterator_tag; - using difference_type = void; - using pointer = void; - using reference = void; - using _Unchecked_type = - truncating_iterator_base; // Mark iterator as checked. - OutputIt base() const { return out_; } - std::size_t count() const { return count_; } -}; -// An output iterator that truncates the output and counts the number of objects -// written to it. -template ::value_type>::type> -class truncating_iterator; +namespace duckdb { -template -class truncating_iterator - : public truncating_iterator_base { - using traits = std::iterator_traits; +//! Selection vector cache used for caching vector slices +struct SelCache { + unordered_map> cache; +}; - mutable typename traits::value_type blackhole_; +} // namespace duckdb - public: - using value_type = typename traits::value_type; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/types/vector_cache.hpp +// +// +//===----------------------------------------------------------------------===// - truncating_iterator(OutputIt out, std::size_t limit) - : truncating_iterator_base(out, limit) {} - truncating_iterator& operator++() { - if (this->count_++ < this->limit_) ++this->out_; - return *this; - } - truncating_iterator operator++(int) { - auto it = *this; - ++*this; - return it; - } - value_type& operator*() const { - return this->count_ < this->limit_ ? *this->out_ : blackhole_; - } -}; -template -class truncating_iterator - : public truncating_iterator_base { - public: - using value_type = typename OutputIt::container_type::value_type; - truncating_iterator(OutputIt out, std::size_t limit) - : truncating_iterator_base(out, limit) {} - truncating_iterator& operator=(value_type val) { - if (this->count_++ < this->limit_) this->out_ = val; - return *this; - } +namespace duckdb { +class Allocator; +class Vector; - truncating_iterator& operator++() { return *this; } - truncating_iterator& operator++(int) { return *this; } - truncating_iterator& operator*() { return *this; } -}; +//! The VectorCache holds cached data that allows for re-use of the same memory by vectors +class VectorCache { +public: + //! Instantiate a vector cache with the given type and capacity + DUCKDB_API explicit VectorCache(Allocator &allocator, const LogicalType &type, + idx_t capacity = STANDARD_VECTOR_SIZE); -// A range with the specified output iterator and value type. -template -class output_range { - private: - OutputIt it_; + buffer_ptr buffer; - public: - using value_type = T; - using iterator = OutputIt; - struct sentinel {}; +public: + void ResetFromCache(Vector &result) const; - explicit output_range(OutputIt it) : it_(it) {} - OutputIt begin() const { return it_; } - sentinel end() const { return {}; } // Sentinel is not used yet. + const LogicalType &GetType() const; }; -template -inline size_t count_code_points(basic_string_view s) { - return s.size(); -} +} // namespace duckdb -// Counts the number of code points in a UTF-8 string. -inline size_t count_code_points(basic_string_view s) { - const char8_t* data = s.data(); - size_t num_code_points = 0; - for (size_t i = 0, size = s.size(); i != size; ++i) { - if ((data[i] & 0xc0) != 0x80) ++num_code_points; - } - return num_code_points; -} -template -inline size_t code_point_index(basic_string_view s, size_t n) { - size_t size = s.size(); - return n < size ? n : size; -} -// Calculates the index of the nth code point in a UTF-8 string. -inline size_t code_point_index(basic_string_view s, size_t n) { - const char8_t* data = s.data(); - size_t num_code_points = 0; - for (size_t i = 0, size = s.size(); i != size; ++i) { - if ((data[i] & 0xc0) != 0x80 && ++num_code_points > n) { - return i; - } - } - return s.size(); -} +#include -inline char8_t to_char8_t(char c) { return static_cast(c); } -template -using needs_conversion = bool_constant< - std::is_same::value_type, - char>::value && - std::is_same::value>; +namespace duckdb { -template ::value)> -OutputIt copy_str(InputIt begin, InputIt end, OutputIt it) { - return std::copy(begin, end, it); +void ArrowConverter::ToArrowArray(DataChunk &input, ArrowArray *out_array) { + ArrowAppender appender(input.GetTypes(), input.size()); + appender.Append(input); + *out_array = appender.Finalize(); } -template ::value)> -OutputIt copy_str(InputIt begin, InputIt end, OutputIt it) { - return std::transform(begin, end, it, to_char8_t); +//===--------------------------------------------------------------------===// +// Arrow Schema +//===--------------------------------------------------------------------===// +struct DuckDBArrowSchemaHolder { + // unused in children + vector children; + // unused in children + vector children_ptrs; + //! used for nested structures + std::list> nested_children; + std::list> nested_children_ptr; + //! This holds strings created to represent decimal types + vector> owned_type_names; +}; + +static void ReleaseDuckDBArrowSchema(ArrowSchema *schema) { + if (!schema || !schema->release) { + return; + } + schema->release = nullptr; + auto holder = static_cast(schema->private_data); + delete holder; } -#ifndef FMT_USE_GRISU -# define FMT_USE_GRISU 1 -#endif +void InitializeChild(ArrowSchema &child, const string &name = "") { + //! Child is cleaned up by parent + child.private_data = nullptr; + child.release = ReleaseDuckDBArrowSchema; -template constexpr bool use_grisu() { - return FMT_USE_GRISU && std::numeric_limits::is_iec559 && - sizeof(T) <= sizeof(double); + //! Store the child schema + child.flags = ARROW_FLAG_NULLABLE; + child.name = name.c_str(); + child.n_children = 0; + child.children = nullptr; + child.metadata = nullptr; + child.dictionary = nullptr; } +void SetArrowFormat(DuckDBArrowSchemaHolder &root_holder, ArrowSchema &child, const LogicalType &type, + string &config_timezone); -template -template -void buffer::append(const U* begin, const U* end) { - std::size_t new_size = size_ + to_unsigned(end - begin); - reserve(new_size); - std::uninitialized_copy(begin, end, make_checked(ptr_, capacity_) + size_); - size_ = new_size; +void SetArrowMapFormat(DuckDBArrowSchemaHolder &root_holder, ArrowSchema &child, const LogicalType &type, + string &config_timezone) { + child.format = "+m"; + //! Map has one child which is a struct + child.n_children = 1; + root_holder.nested_children.emplace_back(); + root_holder.nested_children.back().resize(1); + root_holder.nested_children_ptr.emplace_back(); + root_holder.nested_children_ptr.back().push_back(&root_holder.nested_children.back()[0]); + InitializeChild(root_holder.nested_children.back()[0]); + child.children = &root_holder.nested_children_ptr.back()[0]; + child.children[0]->name = "entries"; + child_list_t struct_child_types; + struct_child_types.push_back(std::make_pair("key", ListType::GetChildType(StructType::GetChildType(type, 0)))); + struct_child_types.push_back(std::make_pair("value", ListType::GetChildType(StructType::GetChildType(type, 1)))); + auto struct_type = LogicalType::STRUCT(move(struct_child_types)); + SetArrowFormat(root_holder, *child.children[0], struct_type, config_timezone); } -} // namespace internal -// A range with an iterator appending to a buffer. -template -class buffer_range : public internal::output_range< - std::back_insert_iterator>, T> { - public: - using iterator = std::back_insert_iterator>; - using internal::output_range::output_range; - buffer_range(internal::buffer& buf) - : internal::output_range(std::back_inserter(buf)) {} -}; +void SetArrowFormat(DuckDBArrowSchemaHolder &root_holder, ArrowSchema &child, const LogicalType &type, + string &config_timezone) { + switch (type.id()) { + case LogicalTypeId::BOOLEAN: + child.format = "b"; + break; + case LogicalTypeId::TINYINT: + child.format = "c"; + break; + case LogicalTypeId::SMALLINT: + child.format = "s"; + break; + case LogicalTypeId::INTEGER: + child.format = "i"; + break; + case LogicalTypeId::BIGINT: + child.format = "l"; + break; + case LogicalTypeId::UTINYINT: + child.format = "C"; + break; + case LogicalTypeId::USMALLINT: + child.format = "S"; + break; + case LogicalTypeId::UINTEGER: + child.format = "I"; + break; + case LogicalTypeId::UBIGINT: + child.format = "L"; + break; + case LogicalTypeId::FLOAT: + child.format = "f"; + break; + case LogicalTypeId::HUGEINT: + child.format = "d:38,0"; + break; + case LogicalTypeId::DOUBLE: + child.format = "g"; + break; + case LogicalTypeId::UUID: + case LogicalTypeId::JSON: + case LogicalTypeId::VARCHAR: + child.format = "u"; + break; + case LogicalTypeId::DATE: + child.format = "tdD"; + break; + case LogicalTypeId::TIME: + case LogicalTypeId::TIME_TZ: + child.format = "ttu"; + break; + case LogicalTypeId::TIMESTAMP: + child.format = "tsu:"; + break; + case LogicalTypeId::TIMESTAMP_TZ: { + string format = "tsu:" + config_timezone; + unique_ptr format_ptr = unique_ptr(new char[format.size() + 1]); + for (size_t i = 0; i < format.size(); i++) { + format_ptr[i] = format[i]; + } + format_ptr[format.size()] = '\0'; + root_holder.owned_type_names.push_back(move(format_ptr)); + child.format = root_holder.owned_type_names.back().get(); + break; + } + case LogicalTypeId::TIMESTAMP_SEC: + child.format = "tss:"; + break; + case LogicalTypeId::TIMESTAMP_NS: + child.format = "tsn:"; + break; + case LogicalTypeId::TIMESTAMP_MS: + child.format = "tsm:"; + break; + case LogicalTypeId::INTERVAL: + child.format = "tDm"; + break; + case LogicalTypeId::DECIMAL: { + uint8_t width, scale; + type.GetDecimalProperties(width, scale); + string format = "d:" + to_string(width) + "," + to_string(scale); + unique_ptr format_ptr = unique_ptr(new char[format.size() + 1]); + for (size_t i = 0; i < format.size(); i++) { + format_ptr[i] = format[i]; + } + format_ptr[format.size()] = '\0'; + root_holder.owned_type_names.push_back(move(format_ptr)); + child.format = root_holder.owned_type_names.back().get(); + break; + } + case LogicalTypeId::SQLNULL: { + child.format = "n"; + break; + } + case LogicalTypeId::BLOB: { + child.format = "z"; + break; + } + case LogicalTypeId::LIST: { + child.format = "+l"; + child.n_children = 1; + root_holder.nested_children.emplace_back(); + root_holder.nested_children.back().resize(1); + root_holder.nested_children_ptr.emplace_back(); + root_holder.nested_children_ptr.back().push_back(&root_holder.nested_children.back()[0]); + InitializeChild(root_holder.nested_children.back()[0]); + child.children = &root_holder.nested_children_ptr.back()[0]; + child.children[0]->name = "l"; + SetArrowFormat(root_holder, **child.children, ListType::GetChildType(type), config_timezone); + break; + } + case LogicalTypeId::STRUCT: { + child.format = "+s"; + auto &child_types = StructType::GetChildTypes(type); + child.n_children = child_types.size(); + root_holder.nested_children.emplace_back(); + root_holder.nested_children.back().resize(child_types.size()); + root_holder.nested_children_ptr.emplace_back(); + root_holder.nested_children_ptr.back().resize(child_types.size()); + for (idx_t type_idx = 0; type_idx < child_types.size(); type_idx++) { + root_holder.nested_children_ptr.back()[type_idx] = &root_holder.nested_children.back()[type_idx]; + } + child.children = &root_holder.nested_children_ptr.back()[0]; + for (size_t type_idx = 0; type_idx < child_types.size(); type_idx++) { -// A UTF-8 string view. -class u8string_view : public basic_string_view { - public: - u8string_view(const char* s) - : basic_string_view(reinterpret_cast(s)) {} - u8string_view(const char* s, size_t count) FMT_NOEXCEPT - : basic_string_view(reinterpret_cast(s), count) { - } -}; + InitializeChild(*child.children[type_idx]); -#if FMT_USE_USER_DEFINED_LITERALS -inline namespace literals { -inline u8string_view operator"" _u(const char* s, std::size_t n) { - return {s, n}; + auto &struct_col_name = child_types[type_idx].first; + unique_ptr name_ptr = unique_ptr(new char[struct_col_name.size() + 1]); + for (size_t i = 0; i < struct_col_name.size(); i++) { + name_ptr[i] = struct_col_name[i]; + } + name_ptr[struct_col_name.size()] = '\0'; + root_holder.owned_type_names.push_back(move(name_ptr)); + + child.children[type_idx]->name = root_holder.owned_type_names.back().get(); + SetArrowFormat(root_holder, *child.children[type_idx], child_types[type_idx].second, config_timezone); + } + break; + } + case LogicalTypeId::MAP: { + SetArrowMapFormat(root_holder, child, type, config_timezone); + break; + } + case LogicalTypeId::ENUM: { + // TODO what do we do with pointer enums here? + switch (EnumType::GetPhysicalType(type)) { + case PhysicalType::UINT8: + child.format = "C"; + break; + case PhysicalType::UINT16: + child.format = "S"; + break; + case PhysicalType::UINT32: + child.format = "I"; + break; + default: + throw InternalException("Unsupported Enum Internal Type"); + } + root_holder.nested_children.emplace_back(); + root_holder.nested_children.back().resize(1); + root_holder.nested_children_ptr.emplace_back(); + root_holder.nested_children_ptr.back().push_back(&root_holder.nested_children.back()[0]); + InitializeChild(root_holder.nested_children.back()[0]); + child.dictionary = root_holder.nested_children_ptr.back()[0]; + child.dictionary->format = "u"; + break; + } + default: + throw InternalException("Unsupported Arrow type " + type.ToString()); + } } -} // namespace literals -#endif -// The number of characters to store in the basic_memory_buffer object itself -// to avoid dynamic memory allocation. -enum { inline_buffer_size = 500 }; +void ArrowConverter::ToArrowSchema(ArrowSchema *out_schema, vector &types, vector &names, + string &config_timezone) { + D_ASSERT(out_schema); + D_ASSERT(types.size() == names.size()); + idx_t column_count = types.size(); + // Allocate as unique_ptr first to cleanup properly on error + auto root_holder = make_unique(); -/** - \rst - A dynamically growing memory buffer for trivially copyable/constructible types - with the first ``SIZE`` elements stored in the object itself. + // Allocate the children + root_holder->children.resize(column_count); + root_holder->children_ptrs.resize(column_count, nullptr); + for (size_t i = 0; i < column_count; ++i) { + root_holder->children_ptrs[i] = &root_holder->children[i]; + } + out_schema->children = root_holder->children_ptrs.data(); + out_schema->n_children = column_count; - You can use one of the following type aliases for common character types: + // Store the schema + out_schema->format = "+s"; // struct apparently + out_schema->flags = 0; + out_schema->metadata = nullptr; + out_schema->name = "duckdb_query_result"; + out_schema->dictionary = nullptr; - +----------------+------------------------------+ - | Type | Definition | - +================+==============================+ - | memory_buffer | basic_memory_buffer | - +----------------+------------------------------+ - | wmemory_buffer | basic_memory_buffer | - +----------------+------------------------------+ + // Configure all child schemas + for (idx_t col_idx = 0; col_idx < column_count; col_idx++) { - **Example**:: + auto &child = root_holder->children[col_idx]; + InitializeChild(child, names[col_idx]); + SetArrowFormat(*root_holder, child, types[col_idx], config_timezone); + } - fmt::memory_buffer out; - format_to(out, "The answer is {}.", 42); + // Release ownership to caller + out_schema->private_data = root_holder.release(); + out_schema->release = ReleaseDuckDBArrowSchema; +} - This will append the following output to the ``out`` object: +} // namespace duckdb - .. code-block:: none - The answer is 42. - The output can be converted to an ``std::string`` with ``to_string(out)``. - \endrst - */ -template > -class basic_memory_buffer : private Allocator, public internal::buffer { - private: - T store_[SIZE]; - // Deallocate memory allocated by the buffer. - void deallocate() { - T* data = this->data(); - if (data != store_) Allocator::deallocate(data, this->capacity()); - } - protected: - void grow(std::size_t size) FMT_OVERRIDE; - public: - using value_type = T; - using const_reference = const T&; - explicit basic_memory_buffer(const Allocator& alloc = Allocator()) - : Allocator(alloc) { - this->set(store_, SIZE); - } - ~basic_memory_buffer() FMT_OVERRIDE { deallocate(); } - private: - // Move data from other to this buffer. - void move(basic_memory_buffer& other) { - Allocator &this_alloc = *this, &other_alloc = other; - this_alloc = std::move(other_alloc); - T* data = other.data(); - std::size_t size = other.size(), capacity = other.capacity(); - if (data == other.store_) { - this->set(store_, capacity); - std::uninitialized_copy(other.store_, other.store_ + size, - internal::make_checked(store_, capacity)); - } else { - this->set(data, capacity); - // Set pointer to the inline array so that delete is not called - // when deallocating. - other.set(other.store_, 0); - } - this->resize(size); - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/arrow/result_arrow_wrapper.hpp +// +// +//===----------------------------------------------------------------------===// - public: - /** - \rst - Constructs a :class:`fmt::basic_memory_buffer` object moving the content - of the other object to it. - \endrst - */ - basic_memory_buffer(basic_memory_buffer&& other) FMT_NOEXCEPT { move(other); } - /** - \rst - Moves the content of the other ``basic_memory_buffer`` object to this one. - \endrst - */ - basic_memory_buffer& operator=(basic_memory_buffer&& other) FMT_NOEXCEPT { - FMT_ASSERT(this != &other, ""); - deallocate(); - move(other); - return *this; - } - // Returns a copy of the allocator associated with this buffer. - Allocator get_allocator() const { return *this; } -}; -template -void basic_memory_buffer::grow(std::size_t size) { -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (size > 1000) throw std::runtime_error("fuzz mode - won't grow that much"); -#endif - std::size_t old_capacity = this->capacity(); - std::size_t new_capacity = old_capacity + old_capacity / 2; - if (size > new_capacity) new_capacity = size; - T* old_data = this->data(); - T* new_data = std::allocator_traits::allocate(*this, new_capacity); - // The following code doesn't throw, so the raw pointer above doesn't leak. - std::uninitialized_copy(old_data, old_data + this->size(), - internal::make_checked(new_data, new_capacity)); - this->set(new_data, new_capacity); - // deallocate must not throw according to the standard, but even if it does, - // the buffer already uses the new storage and will deallocate it in - // destructor. - if (old_data != store_) Allocator::deallocate(old_data, old_capacity); -} -using memory_buffer = basic_memory_buffer; -using wmemory_buffer = basic_memory_buffer; -namespace internal { +namespace duckdb { +class ResultArrowArrayStreamWrapper { +public: + explicit ResultArrowArrayStreamWrapper(unique_ptr result, idx_t batch_size); + ArrowArrayStream stream; + unique_ptr result; + PreservedError last_error; + idx_t batch_size; + vector column_types; + vector column_names; + string timezone_config; -// Returns true if value is negative, false otherwise. -// Same as `value < 0` but doesn't produce warnings if T is an unsigned type. -template ::is_signed)> -FMT_CONSTEXPR bool is_negative(T value) { - return value < 0; -} -template ::is_signed)> -FMT_CONSTEXPR bool is_negative(T) { - return false; -} +private: + static int MyStreamGetSchema(struct ArrowArrayStream *stream, struct ArrowSchema *out); + static int MyStreamGetNext(struct ArrowArrayStream *stream, struct ArrowArray *out); + static void MyStreamRelease(struct ArrowArrayStream *stream); + static const char *MyStreamGetLastError(struct ArrowArrayStream *stream); +}; +} // namespace duckdb -// Smallest of uint32_t, uint64_t, uint128_t that is large enough to -// represent all values of T. -template -using uint32_or_64_or_128_t = conditional_t< - std::numeric_limits::digits <= 32, uint32_t, - conditional_t::digits <= 64, uint64_t, uint128_t>>; -// Static data is placed in this class template for the header-only config. -template struct FMT_EXTERN_TEMPLATE_API basic_data { - static const uint64_t powers_of_10_64[]; - static const uint32_t zero_or_powers_of_10_32[]; - static const uint64_t zero_or_powers_of_10_64[]; - static const uint64_t pow10_significands[]; - static const int16_t pow10_exponents[]; - static const char digits[]; - static const char hex_digits[]; - static const char foreground_color[]; - static const char background_color[]; - static const char reset_color[5]; - static const wchar_t wreset_color[5]; - static const char signs[]; -}; -FMT_EXTERN template struct basic_data; -// This is a struct rather than an alias to avoid shadowing warnings in gcc. -struct data : basic_data<> {}; +namespace duckdb { -#ifdef FMT_BUILTIN_CLZLL -// Returns the number of decimal digits in n. Leading zeros are not counted -// except for n == 0 in which case count_digits returns 1. -inline int count_digits(uint64_t n) { - // Based on http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10 - // and the benchmark https://github.com/localvoid/cxx-benchmark-count-digits. - int t = (64 - FMT_BUILTIN_CLZLL(n | 1)) * 1233 >> 12; - return t - (n < data::zero_or_powers_of_10_64[t]) + 1; -} -#else -// Fallback version of count_digits used when __builtin_clz is not available. -inline int count_digits(uint64_t n) { - int count = 1; - for (;;) { - // Integer division is slow so do it for a group of four digits instead - // of for every digit. The idea comes from the talk by Alexandrescu - // "Three Optimization Tips for C++". See speed-test for a comparison. - if (n < 10) return count; - if (n < 100) return count + 1; - if (n < 1000) return count + 2; - if (n < 10000) return count + 3; - n /= 10000u; - count += 4; - } +ArrowSchemaWrapper::~ArrowSchemaWrapper() { + if (arrow_schema.release) { + for (int64_t child_idx = 0; child_idx < arrow_schema.n_children; child_idx++) { + auto &child = *arrow_schema.children[child_idx]; + if (child.release) { + child.release(&child); + } + } + arrow_schema.release(&arrow_schema); + arrow_schema.release = nullptr; + } } -#endif -#if FMT_USE_INT128 -inline int count_digits(uint128_t n) { - int count = 1; - for (;;) { - // Integer division is slow so do it for a group of four digits instead - // of for every digit. The idea comes from the talk by Alexandrescu - // "Three Optimization Tips for C++". See speed-test for a comparison. - if (n < 10) return count; - if (n < 100) return count + 1; - if (n < 1000) return count + 2; - if (n < 10000) return count + 3; - n /= 10000U; - count += 4; - } +ArrowArrayWrapper::~ArrowArrayWrapper() { + if (arrow_array.release) { + for (int64_t child_idx = 0; child_idx < arrow_array.n_children; child_idx++) { + auto &child = *arrow_array.children[child_idx]; + if (child.release) { + child.release(&child); + } + } + arrow_array.release(&arrow_array); + arrow_array.release = nullptr; + } } -#endif -// Counts the number of digits in n. BITS = log2(radix). -template inline int count_digits(UInt n) { - int num_digits = 0; - do { - ++num_digits; - } while ((n >>= BITS) != 0); - return num_digits; +ArrowArrayStreamWrapper::~ArrowArrayStreamWrapper() { + if (arrow_array_stream.release) { + arrow_array_stream.release(&arrow_array_stream); + arrow_array_stream.release = nullptr; + } } -template <> int count_digits<4>(internal::fallback_uintptr n); +void ArrowArrayStreamWrapper::GetSchema(ArrowSchemaWrapper &schema) { + D_ASSERT(arrow_array_stream.get_schema); + // LCOV_EXCL_START + if (arrow_array_stream.get_schema(&arrow_array_stream, &schema.arrow_schema)) { + throw InvalidInputException("arrow_scan: get_schema failed(): %s", string(GetError())); + } + if (!schema.arrow_schema.release) { + throw InvalidInputException("arrow_scan: released schema passed"); + } + if (schema.arrow_schema.n_children < 1) { + throw InvalidInputException("arrow_scan: empty schema passed"); + } + // LCOV_EXCL_STOP +} -#if FMT_GCC_VERSION || FMT_CLANG_VERSION -# define FMT_ALWAYS_INLINE inline __attribute__((always_inline)) -#else -# define FMT_ALWAYS_INLINE -#endif +shared_ptr ArrowArrayStreamWrapper::GetNextChunk() { + auto current_chunk = make_shared(); + if (arrow_array_stream.get_next(&arrow_array_stream, ¤t_chunk->arrow_array)) { // LCOV_EXCL_START + throw InvalidInputException("arrow_scan: get_next failed(): %s", string(GetError())); + } // LCOV_EXCL_STOP -#ifdef FMT_BUILTIN_CLZ -// Optional version of count_digits for better performance on 32-bit platforms. -inline int count_digits(uint32_t n) { - int t = (32 - FMT_BUILTIN_CLZ(n | 1)) * 1233 >> 12; - return t - (n < data::zero_or_powers_of_10_32[t]) + 1; + return current_chunk; } -#endif -template FMT_API std::string grouping_impl(locale_ref loc); -template inline std::string grouping(locale_ref loc) { - return grouping_impl(loc); -} -template <> inline std::string grouping(locale_ref loc) { - return grouping_impl(loc); -} +const char *ArrowArrayStreamWrapper::GetError() { // LCOV_EXCL_START + return arrow_array_stream.get_last_error(&arrow_array_stream); +} // LCOV_EXCL_STOP -template FMT_API Char thousands_sep_impl(locale_ref loc); -template inline Char thousands_sep(locale_ref loc) { - return Char(thousands_sep_impl(loc)); -} -template <> inline wchar_t thousands_sep(locale_ref loc) { - return thousands_sep_impl(loc); -} +int ResultArrowArrayStreamWrapper::MyStreamGetSchema(struct ArrowArrayStream *stream, struct ArrowSchema *out) { + if (!stream->release) { + return -1; + } + auto my_stream = (ResultArrowArrayStreamWrapper *)stream->private_data; + if (!my_stream->column_types.empty()) { + ArrowConverter::ToArrowSchema(out, my_stream->column_types, my_stream->column_names, + my_stream->timezone_config); + return 0; + } -template FMT_API Char decimal_point_impl(locale_ref loc); -template inline Char decimal_point(locale_ref loc) { - return Char(decimal_point_impl(loc)); -} -template <> inline wchar_t decimal_point(locale_ref loc) { - return decimal_point_impl(loc); + auto &result = *my_stream->result; + if (result.HasError()) { + my_stream->last_error = result.GetErrorObject(); + return -1; + } + if (result.type == QueryResultType::STREAM_RESULT) { + auto &stream_result = (StreamQueryResult &)result; + if (!stream_result.IsOpen()) { + my_stream->last_error = PreservedError("Query Stream is closed"); + return -1; + } + } + if (my_stream->column_types.empty()) { + my_stream->column_types = result.types; + my_stream->column_names = result.names; + } + ArrowConverter::ToArrowSchema(out, my_stream->column_types, my_stream->column_names, my_stream->timezone_config); + return 0; } -// Formats a decimal unsigned integer value writing into buffer. -// add_thousands_sep is called after writing each char to add a thousands -// separator if necessary. -template -inline Char* format_decimal(Char* buffer, UInt value, int num_digits, - F add_thousands_sep) { - FMT_ASSERT(num_digits >= 0, "invalid digit count"); - buffer += num_digits; - Char* end = buffer; - while (value >= 100) { - // Integer division is slow so do it for a group of two digits instead - // of for every digit. The idea comes from the talk by Alexandrescu - // "Three Optimization Tips for C++". See speed-test for a comparison. - auto index = static_cast((value % 100) * 2); - value /= 100; - *--buffer = static_cast(data::digits[index + 1]); - add_thousands_sep(buffer); - *--buffer = static_cast(data::digits[index]); - add_thousands_sep(buffer); - } - if (value < 10) { - *--buffer = static_cast('0' + value); - return end; - } - auto index = static_cast(value * 2); - *--buffer = static_cast(data::digits[index + 1]); - add_thousands_sep(buffer); - *--buffer = static_cast(data::digits[index]); - return end; +int ResultArrowArrayStreamWrapper::MyStreamGetNext(struct ArrowArrayStream *stream, struct ArrowArray *out) { + if (!stream->release) { + return -1; + } + auto my_stream = (ResultArrowArrayStreamWrapper *)stream->private_data; + auto &result = *my_stream->result; + if (result.HasError()) { + my_stream->last_error = result.GetErrorObject(); + return -1; + } + if (result.type == QueryResultType::STREAM_RESULT) { + auto &stream_result = (StreamQueryResult &)result; + if (!stream_result.IsOpen()) { + // Nothing to output + out->release = nullptr; + return 0; + } + } + if (my_stream->column_types.empty()) { + my_stream->column_types = result.types; + my_stream->column_names = result.names; + } + idx_t result_count; + PreservedError error; + if (!ArrowUtil::TryFetchChunk(&result, my_stream->batch_size, out, result_count, error)) { + D_ASSERT(error); + my_stream->last_error = error; + return -1; + } + if (result_count == 0) { + // Nothing to output + out->release = nullptr; + } + return 0; } -template constexpr int digits10() noexcept { - return std::numeric_limits::digits10; +void ResultArrowArrayStreamWrapper::MyStreamRelease(struct ArrowArrayStream *stream) { + if (!stream->release) { + return; + } + stream->release = nullptr; + delete (ResultArrowArrayStreamWrapper *)stream->private_data; } -template <> constexpr int digits10() noexcept { return 38; } -template <> constexpr int digits10() noexcept { return 38; } -template -inline Iterator format_decimal(Iterator out, UInt value, int num_digits, - F add_thousands_sep) { - FMT_ASSERT(num_digits >= 0, "invalid digit count"); - // Buffer should be large enough to hold all digits (<= digits10 + 1). - enum { max_size = digits10() + 1 }; - Char buffer[2 * max_size]; - auto end = format_decimal(buffer, value, num_digits, add_thousands_sep); - return internal::copy_str(buffer, end, out); +const char *ResultArrowArrayStreamWrapper::MyStreamGetLastError(struct ArrowArrayStream *stream) { + if (!stream->release) { + return "stream was released"; + } + D_ASSERT(stream->private_data); + auto my_stream = (ResultArrowArrayStreamWrapper *)stream->private_data; + return my_stream->last_error.Message().c_str(); } -template -inline It format_decimal(It out, UInt value, int num_digits) { - return format_decimal(out, value, num_digits, [](Char*) {}); +ResultArrowArrayStreamWrapper::ResultArrowArrayStreamWrapper(unique_ptr result_p, idx_t batch_size_p) + : result(move(result_p)) { + //! We first initialize the private data of the stream + stream.private_data = this; + //! Ceil Approx_Batch_Size/STANDARD_VECTOR_SIZE + if (batch_size_p == 0) { + throw std::runtime_error("Approximate Batch Size of Record Batch MUST be higher than 0"); + } + batch_size = batch_size_p; + //! We initialize the stream functions + stream.get_schema = ResultArrowArrayStreamWrapper::MyStreamGetSchema; + stream.get_next = ResultArrowArrayStreamWrapper::MyStreamGetNext; + stream.release = ResultArrowArrayStreamWrapper::MyStreamRelease; + stream.get_last_error = ResultArrowArrayStreamWrapper::MyStreamGetLastError; } -template -inline Char* format_uint(Char* buffer, UInt value, int num_digits, - bool upper = false) { - buffer += num_digits; - Char* end = buffer; - do { - const char* digits = upper ? "0123456789ABCDEF" : data::hex_digits; - unsigned digit = (value & ((1 << BASE_BITS) - 1)); - *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) - : digits[digit]); - } while ((value >>= BASE_BITS) != 0); - return end; +bool ArrowUtil::TryFetchNext(QueryResult &result, unique_ptr &chunk, PreservedError &error) { + if (result.type == QueryResultType::STREAM_RESULT) { + auto &stream_result = (StreamQueryResult &)result; + if (!stream_result.IsOpen()) { + return true; + } + } + return result.TryFetch(chunk, error); } -template -Char* format_uint(Char* buffer, internal::fallback_uintptr n, int num_digits, - bool = false) { - auto char_digits = std::numeric_limits::digits / 4; - int start = (num_digits + char_digits - 1) / char_digits - 1; - if (int start_digits = num_digits % char_digits) { - unsigned value = n.value[start--]; - buffer = format_uint(buffer, value, start_digits); - } - for (; start >= 0; --start) { - unsigned value = n.value[start]; - buffer += char_digits; - auto p = buffer; - for (int i = 0; i < char_digits; ++i) { - unsigned digit = (value & ((1 << BASE_BITS) - 1)); - *--p = static_cast(data::hex_digits[digit]); - value >>= BASE_BITS; - } - } - return buffer; +bool ArrowUtil::TryFetchChunk(QueryResult *result, idx_t chunk_size, ArrowArray *out, idx_t &count, + PreservedError &error) { + count = 0; + ArrowAppender appender(result->types, chunk_size); + while (count < chunk_size) { + unique_ptr data_chunk; + if (!TryFetchNext(*result, data_chunk, error)) { + if (result->HasError()) { + error = result->GetErrorObject(); + } + return false; + } + if (!data_chunk || data_chunk->size() == 0) { + break; + } + count += data_chunk->size(); + appender.Append(*data_chunk); + } + if (count > 0) { + *out = appender.Finalize(); + } + return true; } -template -inline It format_uint(It out, UInt value, int num_digits, bool upper = false) { - // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1). - char buffer[num_bits() / BASE_BITS + 1]; - format_uint(buffer, value, num_digits, upper); - return internal::copy_str(buffer, buffer + num_digits, out); +idx_t ArrowUtil::FetchChunk(QueryResult *result, idx_t chunk_size, ArrowArray *out) { + PreservedError error; + idx_t result_count; + if (!TryFetchChunk(result, chunk_size, out, result_count, error)) { + error.Throw(); + } + return result_count; } -template struct null {}; +} // namespace duckdb -// Workaround an array initialization issue in gcc 4.8. -template struct fill_t { - private: - Char data_[6]; - public: - FMT_CONSTEXPR Char& operator[](size_t index) { return data_[index]; } - FMT_CONSTEXPR const Char& operator[](size_t index) const { - return data_[index]; - } - static FMT_CONSTEXPR fill_t make() { - auto fill = fill_t(); - fill[0] = Char(' '); - return fill; - } -}; -} // namespace internal +namespace duckdb { -// We cannot use enum classes as bit fields because of a gcc bug -// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414. -namespace align { -enum type { none, left, right, center, numeric }; +void DuckDBAssertInternal(bool condition, const char *condition_name, const char *file, int linenr) { + if (condition) { + return; + } + throw InternalException("Assertion triggered in file \"%s\" on line %d: %s%s", file, linenr, condition_name, + Exception::GetStackTrace()); } -using align_t = align::type; -namespace sign { -enum type { none, minus, plus, space }; -} -using sign_t = sign::type; +} // namespace duckdb +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/box_renderer.hpp +// +// +//===----------------------------------------------------------------------===// -// Format specifiers for built-in and string types. -template struct basic_format_specs { - int width; - int precision; - char type; - align_t align : 4; - sign_t sign : 3; - bool alt : 1; // Alternate form ('#'). - internal::fill_t fill; - constexpr basic_format_specs() - : width(0), - precision(-1), - type(0), - align(align::none), - sign(sign::none), - alt(false), - fill(internal::fill_t::make()) {} -}; -using format_specs = basic_format_specs; -namespace internal { -// A floating-point presentation format. -enum class float_format : unsigned char { - general, // General: exponent notation or fixed point based on magnitude. - exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. - fixed, // Fixed point with the default precision of 6, e.g. 0.0012. - hex -}; +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/main/query_profiler.hpp +// +// +//===----------------------------------------------------------------------===// -struct float_specs { - int precision; - float_format format : 8; - sign_t sign : 8; - bool upper : 1; - bool locale : 1; - bool percent : 1; - bool binary32 : 1; - bool use_grisu : 1; - bool trailing_zeros : 1; -}; -// Writes the exponent exp in the form "[+-]d{2,3}" to buffer. -template It write_exponent(int exp, It it) { - FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); - if (exp < 0) { - *it++ = static_cast('-'); - exp = -exp; - } else { - *it++ = static_cast('+'); - } - if (exp >= 100) { - const char* top = data::digits + (exp / 100) * 2; - if (exp >= 1000) *it++ = static_cast(top[0]); - *it++ = static_cast(top[1]); - exp %= 100; - } - const char* d = data::digits + exp * 2; - *it++ = static_cast(d[0]); - *it++ = static_cast(d[1]); - return it; -} -template class float_writer { - private: - // The number is given as v = digits_ * pow(10, exp_). - const char* digits_; - int num_digits_; - int exp_; - size_t size_; - float_specs specs_; - Char decimal_point_; - template It prettify(It it) const { - // pow(10, full_exp - 1) <= v <= pow(10, full_exp). - int full_exp = num_digits_ + exp_; - if (specs_.format == float_format::exp) { - // Insert a decimal point after the first digit and add an exponent. - *it++ = static_cast(*digits_); - int num_zeros = specs_.precision - num_digits_; - bool trailing_zeros = num_zeros > 0 && specs_.trailing_zeros; - if (num_digits_ > 1 || trailing_zeros) *it++ = decimal_point_; - it = copy_str(digits_ + 1, digits_ + num_digits_, it); - if (trailing_zeros) - it = std::fill_n(it, num_zeros, static_cast('0')); - *it++ = static_cast(specs_.upper ? 'E' : 'e'); - return write_exponent(full_exp - 1, it); - } - if (num_digits_ <= full_exp) { - // 1234e7 -> 12340000000[.0+] - it = copy_str(digits_, digits_ + num_digits_, it); - it = std::fill_n(it, full_exp - num_digits_, static_cast('0')); - if (specs_.trailing_zeros) { - *it++ = decimal_point_; - int num_zeros = specs_.precision - full_exp; - if (num_zeros <= 0) { - if (specs_.format != float_format::fixed) - *it++ = static_cast('0'); - return it; - } -#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION - if (num_zeros > 1000) - throw std::runtime_error("fuzz mode - avoiding excessive cpu use"); -#endif - it = std::fill_n(it, num_zeros, static_cast('0')); - } - } else if (full_exp > 0) { - // 1234e-2 -> 12.34[0+] - it = copy_str(digits_, digits_ + full_exp, it); - if (!specs_.trailing_zeros) { - // Remove trailing zeros. - int num_digits = num_digits_; - while (num_digits > full_exp && digits_[num_digits - 1] == '0') - --num_digits; - if (num_digits != full_exp) *it++ = decimal_point_; - return copy_str(digits_ + full_exp, digits_ + num_digits, it); - } - *it++ = decimal_point_; - it = copy_str(digits_ + full_exp, digits_ + num_digits_, it); - if (specs_.precision > num_digits_) { - // Add trailing zeros. - int num_zeros = specs_.precision - num_digits_; - it = std::fill_n(it, num_zeros, static_cast('0')); - } - } else { - // 1234e-6 -> 0.001234 - *it++ = static_cast('0'); - int num_zeros = -full_exp; - if (specs_.precision >= 0 && specs_.precision < num_zeros) - num_zeros = specs_.precision; - int num_digits = num_digits_; - if (!specs_.trailing_zeros) - while (num_digits > 0 && digits_[num_digits - 1] == '0') --num_digits; - if (num_zeros != 0 || num_digits != 0) { - *it++ = decimal_point_; - it = std::fill_n(it, num_zeros, static_cast('0')); - it = copy_str(digits_, digits_ + num_digits, it); - } - } - return it; - } - public: - float_writer(const char* digits, int num_digits, int exp, float_specs specs, - Char decimal_point) - : digits_(digits), - num_digits_(num_digits), - exp_(exp), - specs_(specs), - decimal_point_(decimal_point) { - int full_exp = num_digits + exp - 1; - int precision = specs.precision > 0 ? specs.precision : 16; - if (specs_.format == float_format::general && - !(full_exp >= -4 && full_exp < precision)) { - specs_.format = float_format::exp; - } - size_ = prettify(counting_iterator()).count(); - size_ += specs.sign ? 1 : 0; - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/profiler.hpp +// +// +//===----------------------------------------------------------------------===// - size_t size() const { return size_; } - size_t width() const { return size(); } - template void operator()(It&& it) { - if (specs_.sign) *it++ = static_cast(data::signs[specs_.sign]); - it = prettify(it); - } -}; -template -int format_float(T value, int precision, float_specs specs, buffer& buf); -// Formats a floating-point number with snprintf. -template -int snprintf_float(T value, int precision, float_specs specs, - buffer& buf); -template T promote_float(T value) { return value; } -inline double promote_float(float value) { return value; } -template -FMT_CONSTEXPR void handle_int_type_spec(char spec, Handler&& handler) { - switch (spec) { - case 0: - case 'd': - handler.on_dec(); - break; - case 'x': - case 'X': - handler.on_hex(); - break; - case 'b': - case 'B': - handler.on_bin(); - break; - case 'o': - handler.on_oct(); - break; - case 'n': - handler.on_num(); - break; - default: - handler.on_error(); - } -} +namespace duckdb { -template -FMT_CONSTEXPR float_specs parse_float_type_spec( - const basic_format_specs& specs, ErrorHandler&& eh = {}) { - auto result = float_specs(); - result.trailing_zeros = specs.alt; - switch (specs.type) { - case 0: - result.format = float_format::general; - result.trailing_zeros |= specs.precision != 0; - break; - case 'G': - result.upper = true; - FMT_FALLTHROUGH; - case 'g': - result.format = float_format::general; - break; - case 'E': - result.upper = true; - FMT_FALLTHROUGH; - case 'e': - result.format = float_format::exp; - result.trailing_zeros |= specs.precision != 0; - break; - case 'F': - result.upper = true; - FMT_FALLTHROUGH; - case 'f': - result.format = float_format::fixed; - result.trailing_zeros |= specs.precision != 0; - break; -#if FMT_DEPRECATED_PERCENT - case '%': - result.format = float_format::fixed; - result.percent = true; - break; -#endif - case 'A': - result.upper = true; - FMT_FALLTHROUGH; - case 'a': - result.format = float_format::hex; - break; - case 'n': - result.locale = true; - break; - default: - eh.on_error("invalid type specifier"); - break; - } - return result; -} +//! The profiler can be used to measure elapsed time +template +class BaseProfiler { +public: + //! Starts the timer + void Start() { + finished = false; + start = Tick(); + } + //! Finishes timing + void End() { + end = Tick(); + finished = true; + } -template -FMT_CONSTEXPR void handle_char_specs(const basic_format_specs* specs, - Handler&& handler) { - if (!specs) return handler.on_char(); - if (specs->type && specs->type != 'c') return handler.on_int(); - if (specs->align == align::numeric || specs->sign != sign::none || specs->alt) - handler.on_error("invalid format specifier for char"); - handler.on_char(); -} + //! Returns the elapsed time in seconds. If End() has been called, returns + //! the total elapsed time. Otherwise returns how far along the timer is + //! right now. + double Elapsed() const { + auto _end = finished ? end : Tick(); + return std::chrono::duration_cast>(_end - start).count(); + } -template -FMT_CONSTEXPR void handle_cstring_type_spec(Char spec, Handler&& handler) { - if (spec == 0 || spec == 's') - handler.on_string(); - else if (spec == 'p') - handler.on_pointer(); - else - handler.on_error("invalid type specifier"); -} +private: + time_point Tick() const { + return T::now(); + } + time_point start; + time_point end; + bool finished = false; +}; -template -FMT_CONSTEXPR void check_string_type_spec(Char spec, ErrorHandler&& eh) { - if (spec != 0 && spec != 's') eh.on_error("invalid type specifier"); -} +using Profiler = BaseProfiler; -template -FMT_CONSTEXPR void check_pointer_type_spec(Char spec, ErrorHandler&& eh) { - if (spec != 0 && spec != 'p') eh.on_error("invalid type specifier"); -} +} // namespace duckdb -template class int_type_checker : private ErrorHandler { - public: - FMT_CONSTEXPR explicit int_type_checker(ErrorHandler eh) : ErrorHandler(eh) {} - FMT_CONSTEXPR void on_dec() {} - FMT_CONSTEXPR void on_hex() {} - FMT_CONSTEXPR void on_bin() {} - FMT_CONSTEXPR void on_oct() {} - FMT_CONSTEXPR void on_num() {} - FMT_CONSTEXPR void on_error() { - ErrorHandler::on_error("invalid type specifier"); - } -}; -template -class char_specs_checker : public ErrorHandler { - private: - char type_; - public: - FMT_CONSTEXPR char_specs_checker(char type, ErrorHandler eh) - : ErrorHandler(eh), type_(type) {} - FMT_CONSTEXPR void on_int() { - handle_int_type_spec(type_, int_type_checker(*this)); - } - FMT_CONSTEXPR void on_char() {} -}; -template -class cstring_type_checker : public ErrorHandler { - public: - FMT_CONSTEXPR explicit cstring_type_checker(ErrorHandler eh) - : ErrorHandler(eh) {} +#include - FMT_CONSTEXPR void on_string() {} - FMT_CONSTEXPR void on_pointer() {} -}; -template -void arg_map::init(const basic_format_args& args) { - if (map_) return; - map_ = new entry[internal::to_unsigned(args.max_size())]; - if (args.is_packed()) { - for (int i = 0;; ++i) { - internal::type arg_type = args.type(i); - if (arg_type == internal::none_type) return; - if (arg_type == internal::named_arg_type) push_back(args.values_[i]); - } - } - for (int i = 0, n = args.max_size(); i < n; ++i) { - auto type = args.args_[i].type_; - if (type == internal::named_arg_type) push_back(args.args_[i].value_); - } -} -template struct nonfinite_writer { - sign_t sign; - const char* str; - static constexpr size_t str_size = 3; +namespace duckdb { +class ClientContext; +class ExpressionExecutor; +class PhysicalOperator; +class SQLStatement; - size_t size() const { return str_size + (sign ? 1 : 0); } - size_t width() const { return size(); } +//! The ExpressionInfo keeps information related to an expression +struct ExpressionInfo { + explicit ExpressionInfo() : hasfunction(false) { + } + // A vector of children + vector> children; + // Extract ExpressionInformation from a given expression state + void ExtractExpressionsRecursive(unique_ptr &state); - template void operator()(It&& it) const { - if (sign) *it++ = static_cast(data::signs[sign]); - it = copy_str(str, str + str_size, it); - } + //! Whether or not expression has function + bool hasfunction; + //! The function Name + string function_name; + //! The function time + uint64_t function_time = 0; + //! Count the number of ALL tuples + uint64_t tuples_count = 0; + //! Count the number of tuples sampled + uint64_t sample_tuples_count = 0; }; -// This template provides operations for formatting and writing data into a -// character range. -template class basic_writer { - public: - using char_type = typename Range::value_type; - using iterator = typename Range::iterator; - using format_specs = basic_format_specs; +//! The ExpressionRootInfo keeps information related to the root of an expression tree +struct ExpressionRootInfo { + ExpressionRootInfo(ExpressionExecutorState &executor, string name); - private: - iterator out_; // Output iterator. - locale_ref locale_; + //! Count the number of time the executor called + uint64_t total_count = 0; + //! Count the number of time the executor called since last sampling + uint64_t current_count = 0; + //! Count the number of samples + uint64_t sample_count = 0; + //! Count the number of tuples in all samples + uint64_t sample_tuples_count = 0; + //! Count the number of tuples processed by this executor + uint64_t tuples_count = 0; + //! A vector which contain the pointer to root of each expression tree + unique_ptr root; + //! Name + string name; + //! Elapsed time + double time; + //! Extra Info + string extra_info; +}; - // Attempts to reserve space for n extra characters in the output range. - // Returns a pointer to the reserved range or a reference to out_. - auto reserve(std::size_t n) -> decltype(internal::reserve(out_, n)) { - return internal::reserve(out_, n); - } +struct ExpressionExecutorInfo { + explicit ExpressionExecutorInfo() {}; + explicit ExpressionExecutorInfo(ExpressionExecutor &executor, const string &name, int id); - template struct padded_int_writer { - size_t size_; - string_view prefix; - char_type fill; - std::size_t padding; - F f; + //! A vector which contain the pointer to all ExpressionRootInfo + vector> roots; + //! Id, it will be used as index for executors_info vector + int id; +}; - size_t size() const { return size_; } - size_t width() const { return size_; } +struct OperatorInformation { + explicit OperatorInformation(double time_ = 0, idx_t elements_ = 0) : time(time_), elements(elements_) { + } - template void operator()(It&& it) const { - if (prefix.size() != 0) - it = copy_str(prefix.begin(), prefix.end(), it); - it = std::fill_n(it, padding, fill); - f(it); - } - }; + double time = 0; + idx_t elements = 0; + string name; + //! A vector of Expression Executor Info + vector> executors_info; +}; - // Writes an integer in the format - // - // where are written by f(it). - template - void write_int(int num_digits, string_view prefix, format_specs specs, F f) { - std::size_t size = prefix.size() + to_unsigned(num_digits); - char_type fill = specs.fill[0]; - std::size_t padding = 0; - if (specs.align == align::numeric) { - auto unsiged_width = to_unsigned(specs.width); - if (unsiged_width > size) { - padding = unsiged_width - size; - size = unsiged_width; - } - } else if (specs.precision > num_digits) { - size = prefix.size() + to_unsigned(specs.precision); - padding = to_unsigned(specs.precision - num_digits); - fill = static_cast('0'); - } - if (specs.align == align::none) specs.align = align::right; - write_padded(specs, padded_int_writer{size, prefix, fill, padding, f}); - } +//! The OperatorProfiler measures timings of individual operators +class OperatorProfiler { + friend class QueryProfiler; - // Writes a decimal integer. - template void write_decimal(Int value) { - auto abs_value = static_cast>(value); - bool negative = is_negative(value); - // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. - if (negative) abs_value = ~abs_value + 1; - int num_digits = count_digits(abs_value); - auto&& it = reserve((negative ? 1 : 0) + static_cast(num_digits)); - if (negative) *it++ = static_cast('-'); - it = format_decimal(it, abs_value, num_digits); - } +public: + DUCKDB_API explicit OperatorProfiler(bool enabled); - // The handle_int_type_spec handler that writes an integer. - template struct int_writer { - using unsigned_type = uint32_or_64_or_128_t; + DUCKDB_API void StartOperator(const PhysicalOperator *phys_op); + DUCKDB_API void EndOperator(DataChunk *chunk); + DUCKDB_API void Flush(const PhysicalOperator *phys_op, ExpressionExecutor *expression_executor, const string &name, + int id); - basic_writer& writer; - const Specs& specs; - unsigned_type abs_value; - char prefix[4]; - unsigned prefix_size; + ~OperatorProfiler() { + } - string_view get_prefix() const { return string_view(prefix, prefix_size); } +private: + void AddTiming(const PhysicalOperator *op, double time, idx_t elements); - int_writer(basic_writer& w, Int value, const Specs& s) - : writer(w), - specs(s), - abs_value(static_cast(value)), - prefix_size(0) { - if (is_negative(value)) { - prefix[0] = '-'; - ++prefix_size; - abs_value = 0 - abs_value; - } else if (specs.sign != sign::none && specs.sign != sign::minus) { - prefix[0] = specs.sign == sign::plus ? '+' : ' '; - ++prefix_size; - } - } + //! Whether or not the profiler is enabled + bool enabled; + //! The timer used to time the execution time of the individual Physical Operators + Profiler op; + //! The stack of Physical Operators that are currently active + const PhysicalOperator *active_operator; + //! A mapping of physical operators to recorded timings + unordered_map timings; +}; - struct dec_writer { - unsigned_type abs_value; - int num_digits; +//! The QueryProfiler can be used to measure timings of queries +class QueryProfiler { +public: + DUCKDB_API QueryProfiler(ClientContext &context); - template void operator()(It&& it) const { - it = internal::format_decimal(it, abs_value, num_digits); - } - }; +public: + struct TreeNode { + PhysicalOperatorType type; + string name; + string extra_info; + OperatorInformation info; + vector> children; + idx_t depth = 0; + }; - void on_dec() { - int num_digits = count_digits(abs_value); - writer.write_int(num_digits, get_prefix(), specs, - dec_writer{abs_value, num_digits}); - } + // Propagate save_location, enabled, detailed_enabled and automatic_print_format. + void Propagate(QueryProfiler &qp); - struct hex_writer { - int_writer& self; - int num_digits; + using TreeMap = unordered_map; - template void operator()(It&& it) const { - it = format_uint<4, char_type>(it, self.abs_value, num_digits, - self.specs.type != 'x'); - } - }; +private: + unique_ptr CreateTree(PhysicalOperator *root, idx_t depth = 0); + void Render(const TreeNode &node, std::ostream &str) const; - void on_hex() { - if (specs.alt) { - prefix[prefix_size++] = '0'; - prefix[prefix_size++] = specs.type; - } - int num_digits = count_digits<4>(abs_value); - writer.write_int(num_digits, get_prefix(), specs, - hex_writer{*this, num_digits}); - } +public: + DUCKDB_API bool IsEnabled() const; + DUCKDB_API bool IsDetailedEnabled() const; + DUCKDB_API ProfilerPrintFormat GetPrintFormat() const; + DUCKDB_API bool PrintOptimizerOutput() const; + DUCKDB_API string GetSaveLocation() const; - template struct bin_writer { - unsigned_type abs_value; - int num_digits; + DUCKDB_API static QueryProfiler &Get(ClientContext &context); - template void operator()(It&& it) const { - it = format_uint(it, abs_value, num_digits); - } - }; + DUCKDB_API void StartQuery(string query, bool is_explain_analyze = false, bool start_at_optimizer = false); + DUCKDB_API void EndQuery(); - void on_bin() { - if (specs.alt) { - prefix[prefix_size++] = '0'; - prefix[prefix_size++] = static_cast(specs.type); - } - int num_digits = count_digits<1>(abs_value); - writer.write_int(num_digits, get_prefix(), specs, - bin_writer<1>{abs_value, num_digits}); - } + DUCKDB_API void StartExplainAnalyze(); - void on_oct() { - int num_digits = count_digits<3>(abs_value); - if (specs.alt && specs.precision <= num_digits && abs_value != 0) { - // Octal prefix '0' is counted as a digit, so only add it if precision - // is not greater than the number of digits. - prefix[prefix_size++] = '0'; - } - writer.write_int(num_digits, get_prefix(), specs, - bin_writer<3>{abs_value, num_digits}); - } + //! Adds the timings gathered by an OperatorProfiler to this query profiler + DUCKDB_API void Flush(OperatorProfiler &profiler); - enum { sep_size = 1 }; + DUCKDB_API void StartPhase(string phase); + DUCKDB_API void EndPhase(); - struct num_writer { - unsigned_type abs_value; - int size; - const std::string& groups; - char_type sep; + DUCKDB_API void Initialize(PhysicalOperator *root); - template void operator()(It&& it) const { - basic_string_view s(&sep, sep_size); - // Index of a decimal digit with the least significant digit having - // index 0. - int digit_index = 0; - std::string::const_iterator group = groups.cbegin(); - it = format_decimal( - it, abs_value, size, - [this, s, &group, &digit_index](char_type*& buffer) { - if (*group <= 0 || ++digit_index % *group != 0 || - *group == max_value()) - return; - if (group + 1 != groups.cend()) { - digit_index = 0; - ++group; - } - buffer -= s.size(); - std::uninitialized_copy(s.data(), s.data() + s.size(), - make_checked(buffer, s.size())); - }); - } - }; + DUCKDB_API string QueryTreeToString() const; + DUCKDB_API void QueryTreeToStream(std::ostream &str) const; + DUCKDB_API void Print(); - void on_num() { - std::string groups = grouping(writer.locale_); - if (groups.empty()) return on_dec(); - auto sep = thousands_sep(writer.locale_); - if (!sep) return on_dec(); - int num_digits = count_digits(abs_value); - int size = num_digits; - std::string::const_iterator group = groups.cbegin(); - while (group != groups.cend() && num_digits > *group && *group > 0 && - *group != max_value()) { - size += sep_size; - num_digits -= *group; - ++group; - } - if (group == groups.cend()) - size += sep_size * ((num_digits - 1) / groups.back()); - writer.write_int(size, get_prefix(), specs, - num_writer{abs_value, size, groups, sep}); - } + //! return the printed as a string. Unlike ToString, which is always formatted as a string, + //! the return value is formatted based on the current print format (see GetPrintFormat()). + DUCKDB_API string ToString() const; - FMT_NORETURN void on_error() { - FMT_THROW(duckdb::Exception("invalid type specifier")); - } - }; + DUCKDB_API string ToJSON() const; + DUCKDB_API void WriteToFile(const char *path, string &info) const; - template struct str_writer { - const Char* s; - size_t size_; + idx_t OperatorSize() { + return tree_map.size(); + } - size_t size() const { return size_; } - size_t width() const { - return count_code_points(basic_string_view(s, size_)); - } + void Finalize(TreeNode &node); - template void operator()(It&& it) const { - it = copy_str(s, s + size_, it); - } - }; +private: + ClientContext &context; - template struct pointer_writer { - UIntPtr value; - int num_digits; + //! Whether or not the query profiler is running + bool running; + //! The lock used for flushing information from a thread into the global query profiler + mutex flush_lock; - size_t size() const { return to_unsigned(num_digits) + 2; } - size_t width() const { return size(); } + //! Whether or not the query requires profiling + bool query_requires_profiling; - template void operator()(It&& it) const { - *it++ = static_cast('0'); - *it++ = static_cast('x'); - it = format_uint<4, char_type>(it, value, num_digits); - } - }; + //! The root of the query tree + unique_ptr root; + //! The query string + string query; + //! The timer used to time the execution time of the entire query + Profiler main_query; + //! A map of a Physical Operator pointer to a tree node + TreeMap tree_map; + //! Whether or not we are running as part of a explain_analyze query + bool is_explain_analyze; - public: - explicit basic_writer(Range out, locale_ref loc = locale_ref()) - : out_(out.begin()), locale_(loc) {} +public: + const TreeMap &GetTreeMap() const { + return tree_map; + } - iterator out() const { return out_; } +private: + //! The timer used to time the individual phases of the planning process + Profiler phase_profiler; + //! A mapping of the phase names to the timings + using PhaseTimingStorage = unordered_map; + PhaseTimingStorage phase_timings; + using PhaseTimingItem = PhaseTimingStorage::value_type; + //! The stack of currently active phases + vector phase_stack; - // Writes a value in the format - // - // where is written by f(it). - template void write_padded(const format_specs& specs, F&& f) { - // User-perceived width (in code points). - unsigned width = to_unsigned(specs.width); - size_t size = f.size(); // The number of code units. - size_t num_code_points = width != 0 ? f.width() : size; - if (width <= num_code_points) return f(reserve(size)); - auto&& it = reserve(width + (size - num_code_points)); - char_type fill = specs.fill[0]; - std::size_t padding = width - num_code_points; - if (specs.align == align::right) { - it = std::fill_n(it, padding, fill); - f(it); - } else if (specs.align == align::center) { - std::size_t left_padding = padding / 2; - it = std::fill_n(it, left_padding, fill); - f(it); - it = std::fill_n(it, padding - left_padding, fill); - } else { - f(it); - it = std::fill_n(it, padding, fill); - } - } +private: + vector GetOrderedPhaseTimings() const; - void write(int value) { write_decimal(value); } - void write(long value) { write_decimal(value); } - void write(long long value) { write_decimal(value); } + //! Check whether or not an operator type requires query profiling. If none of the ops in a query require profiling + //! no profiling information is output. + bool OperatorRequiresProfiling(PhysicalOperatorType op_type); +}; - void write(unsigned value) { write_decimal(value); } - void write(unsigned long value) { write_decimal(value); } - void write(unsigned long long value) { write_decimal(value); } +//! The QueryProfilerHistory can be used to access the profiler of previous queries +class QueryProfilerHistory { +private: + //! Previous Query profilers + deque>> prev_profilers; + //! Previous Query profilers size + uint64_t prev_profilers_size = 20; -#if FMT_USE_INT128 - void write(int128_t value) { write_decimal(value); } - void write(uint128_t value) { write_decimal(value); } -#endif +public: + deque>> &GetPrevProfilers() { + return prev_profilers; + } + QueryProfilerHistory() { + } - template - void write_int(T value, const Spec& spec) { - handle_int_type_spec(spec.type, int_writer(*this, value, spec)); - } + void SetPrevProfilersSize(uint64_t prevProfilersSize) { + prev_profilers_size = prevProfilersSize; + } + uint64_t GetPrevProfilersSize() const { + return prev_profilers_size; + } - template ::value)> - void write(T value, format_specs specs = {}) { - float_specs fspecs = parse_float_type_spec(specs); - fspecs.sign = specs.sign; - if (std::signbit(value)) { // value < 0 is false for NaN so use signbit. - fspecs.sign = sign::minus; - value = -value; - } else if (fspecs.sign == sign::minus) { - fspecs.sign = sign::none; - } +public: + void SetProfilerHistorySize(uint64_t size) { + this->prev_profilers_size = size; + } +}; +} // namespace duckdb - if (!std::isfinite(value)) { - auto str = std::isinf(value) ? (fspecs.upper ? "INF" : "inf") - : (fspecs.upper ? "NAN" : "nan"); - return write_padded(specs, nonfinite_writer{fspecs.sign, str}); - } +//===----------------------------------------------------------------------===// +// DuckDB +// +// duckdb/common/list.hpp +// +// +//===----------------------------------------------------------------------===// - if (specs.align == align::none) { - specs.align = align::right; - } else if (specs.align == align::numeric) { - if (fspecs.sign) { - auto&& it = reserve(1); - *it++ = static_cast(data::signs[fspecs.sign]); - fspecs.sign = sign::none; - if (specs.width != 0) --specs.width; - } - specs.align = align::right; - } - memory_buffer buffer; - if (fspecs.format == float_format::hex) { - if (fspecs.sign) buffer.push_back(data::signs[fspecs.sign]); - snprintf_float(promote_float(value), specs.precision, fspecs, buffer); - write_padded(specs, str_writer{buffer.data(), buffer.size()}); - return; - } - int precision = specs.precision >= 0 || !specs.type ? specs.precision : 6; - if (fspecs.format == float_format::exp) ++precision; - if (const_check(std::is_same())) fspecs.binary32 = true; - fspecs.use_grisu = use_grisu(); - if (const_check(FMT_DEPRECATED_PERCENT) && fspecs.percent) value *= 100; - int exp = format_float(promote_float(value), precision, fspecs, buffer); - if (const_check(FMT_DEPRECATED_PERCENT) && fspecs.percent) { - buffer.push_back('%'); - --exp; // Adjust decimal place position. - } - fspecs.precision = precision; - char_type point = fspecs.locale ? decimal_point(locale_) - : static_cast('.'); - write_padded(specs, float_writer(buffer.data(), - static_cast(buffer.size()), - exp, fspecs, point)); - } - void write(char value) { - auto&& it = reserve(1); - *it++ = value; - } +#include - template ::value)> - void write(Char value) { - auto&& it = reserve(1); - *it++ = value; - } +namespace duckdb { +using std::list; +} - void write(string_view value) { - auto&& it = reserve(value.size()); - it = copy_str(value.begin(), value.end(), it); - } - void write(wstring_view value) { - static_assert(std::is_same::value, ""); - auto&& it = reserve(value.size()); - it = std::copy(value.begin(), value.end(), it); - } - template - void write(const Char* s, std::size_t size, const format_specs& specs) { - write_padded(specs, str_writer{s, size}); - } +namespace duckdb { +class ColumnDataCollection; +class ColumnDataRowCollection; - template - void write(basic_string_view s, const format_specs& specs = {}) { - const Char* data = s.data(); - std::size_t size = s.size(); - if (specs.precision >= 0 && to_unsigned(specs.precision) < size) - size = code_point_index(s, to_unsigned(specs.precision)); - write(data, size, specs); - } +enum class ValueRenderAlignment { LEFT, MIDDLE, RIGHT }; - template - void write_pointer(UIntPtr value, const format_specs* specs) { - int num_digits = count_digits<4>(value); - auto pw = pointer_writer{value, num_digits}; - if (!specs) return pw(reserve(to_unsigned(num_digits) + 2)); - format_specs specs_copy = *specs; - if (specs_copy.align == align::none) specs_copy.align = align::right; - write_padded(specs_copy, pw); - } -}; +struct BoxRendererConfig { + // a max_width of 0 means we default to the terminal width + idx_t max_width = 0; + idx_t max_rows = 20; + // the max col width determines the maximum size of a single column + // note that the max col width is only used if the result does not fit on the screen + idx_t max_col_width = 20; + string null_value = "NULL"; -using writer = basic_writer>; +#ifndef DUCKDB_ASCII_TREE_RENDERER + const char *LTCORNER = "\342\224\214"; // "┌"; + const char *RTCORNER = "\342\224\220"; // "┐"; + const char *LDCORNER = "\342\224\224"; // "└"; + const char *RDCORNER = "\342\224\230"; // "┘"; -template struct is_integral : std::is_integral {}; -template <> struct is_integral : std::true_type {}; -template <> struct is_integral : std::true_type {}; + const char *MIDDLE = "\342\224\274"; // "┼"; + const char *TMIDDLE = "\342\224\254"; // "┬"; + const char *LMIDDLE = "\342\224\234"; // "├"; + const char *RMIDDLE = "\342\224\244"; // "┤"; + const char *DMIDDLE = "\342\224\264"; // "┴"; -template -class arg_formatter_base { - public: - using char_type = typename Range::value_type; - using iterator = typename Range::iterator; - using format_specs = basic_format_specs; + const char *VERTICAL = "\342\224\202"; // "│"; + const char *HORIZONTAL = "\342\224\200"; // "─"; - private: - using writer_type = basic_writer; - writer_type writer_; - format_specs* specs_; + const char *DOTDOTDOT = "\xE2\x80\xA6"; // "…"; + const char *DOT = "\xC2\xB7"; // "·"; + const idx_t DOTDOTDOT_LENGTH = 1; - struct char_writer { - char_type value; +#else + // ASCII version + const char *LTCORNER = "<"; + const char *RTCORNER = ">"; + const char *LDCORNER = "<"; + const char *RDCORNER = ">"; - size_t size() const { return 1; } - size_t width() const { return 1; } + const char *MIDDLE = "+"; + const char *TMIDDLE = "+"; + const char *LMIDDLE = "+"; + const char *RMIDDLE = "+"; + const char *DMIDDLE = "+"; - template void operator()(It&& it) const { *it++ = value; } - }; + const char *VERTICAL = "|"; + const char *HORIZONTAL = "-"; - void write_char(char_type value) { - if (specs_) - writer_.write_padded(*specs_, char_writer{value}); - else - writer_.write(value); - } + const char *DOTDOTDOT = "..."; // "..."; + const char *DOT = "."; // "."; + const idx_t DOTDOTDOT_LENGTH = 3; +#endif +}; - void write_pointer(const void* p) { - writer_.write_pointer(internal::to_uintptr(p), specs_); - } +class BoxRenderer { + static const idx_t SPLIT_COLUMN; - protected: - writer_type& writer() { return writer_; } - FMT_DEPRECATED format_specs* spec() { return specs_; } - format_specs* specs() { return specs_; } - iterator out() { return writer_.out(); } +public: + explicit BoxRenderer(BoxRendererConfig config_p = BoxRendererConfig()); - void write(bool value) { - string_view sv(value ? "true" : "false"); - specs_ ? writer_.write(sv, *specs_) : writer_.write(sv); - } + string ToString(ClientContext &context, const vector &names, const ColumnDataCollection &op); - void write(const char_type* value) { - if (!value) { - FMT_THROW(duckdb::Exception("string pointer is null")); - } else { - auto length = std::char_traits::length(value); - basic_string_view sv(value, length); - specs_ ? writer_.write(sv, *specs_) : writer_.write(sv); - } - } + void Render(ClientContext &context, const vector &names, const ColumnDataCollection &op, std::ostream &ss); + void Print(ClientContext &context, const vector &names, const ColumnDataCollection &op); - public: - arg_formatter_base(Range r, format_specs* s, locale_ref loc) - : writer_(r, loc), specs_(s) {} +private: + //! The configuration used for rendering + BoxRendererConfig config; - iterator operator()(monostate) { - FMT_ASSERT(false, "invalid argument type"); - return out(); - } +private: + void RenderValue(std::ostream &ss, const string &value, idx_t column_width, + ValueRenderAlignment alignment = ValueRenderAlignment::MIDDLE); + string RenderType(const LogicalType &type); + ValueRenderAlignment TypeAlignment(const LogicalType &type); + string GetRenderValue(ColumnDataRowCollection &rows, idx_t c, idx_t r); - template ::value)> - iterator operator()(T value) { - if (specs_) - writer_.write_int(value, *specs_); - else - writer_.write(value); - return out(); - } + list FetchRenderCollections(ClientContext &context, const ColumnDataCollection &result, + idx_t top_rows, idx_t bottom_rows); + vector ComputeRenderWidths(const vector &names, const ColumnDataCollection &result, + list &collections, idx_t min_width, idx_t max_width, + vector &column_map, idx_t &total_length); + void RenderHeader(const vector &names, const vector &result_types, + const vector &column_map, const vector &widths, const vector &boundaries, + idx_t total_length, bool has_results, std::ostream &ss); + void RenderValues(const list &collections, const vector &column_map, + const vector &widths, const vector &result_types, std::ostream &ss); + void RenderRowCount(string row_count_str, string shown_str, const string &column_count_str, + const vector &boundaries, bool has_hidden_rows, bool has_hidden_columns, + idx_t total_length, idx_t row_count, idx_t column_count, idx_t minimum_row_length, + std::ostream &ss); +}; - iterator operator()(char_type value) { - internal::handle_char_specs( - specs_, char_spec_handler(*this, static_cast(value))); - return out(); - } +} // namespace duckdb - iterator operator()(bool value) { - if (specs_ && specs_->type) return (*this)(value ? 1 : 0); - write(value != 0); - return out(); - } - template ::value)> - iterator operator()(T value) { - writer_.write(value, specs_ ? *specs_ : format_specs()); - return out(); - } - struct char_spec_handler : ErrorHandler { - arg_formatter_base& formatter; - char_type value; - char_spec_handler(arg_formatter_base& f, char_type val) - : formatter(f), value(val) {} - void on_int() { - if (formatter.specs_) - formatter.writer_.write_int(value, *formatter.specs_); - else - formatter.writer_.write(value); - } - void on_char() { formatter.write_char(value); } - }; +// LICENSE_CHANGE_BEGIN +// The following code up to LICENSE_CHANGE_END is subject to THIRD PARTY LICENSE #2 +// See the end of this file for a list - struct cstring_spec_handler : internal::error_handler { - arg_formatter_base& formatter; - const char_type* value; - cstring_spec_handler(arg_formatter_base& f, const char_type* val) - : formatter(f), value(val) {} - void on_string() { formatter.write(value); } - void on_pointer() { formatter.write_pointer(value); } - }; +#include +#include +#include - iterator operator()(const char_type* value) { - if (!specs_) return write(value), out(); - internal::handle_cstring_type_spec(specs_->type, - cstring_spec_handler(*this, value)); - return out(); - } +namespace duckdb { - iterator operator()(basic_string_view value) { - if (specs_) { - internal::check_string_type_spec(specs_->type, internal::error_handler()); - writer_.write(value, *specs_); - } else { - writer_.write(value); - } - return out(); - } +enum class UnicodeType { INVALID, ASCII, UNICODE }; +enum class UnicodeInvalidReason { BYTE_MISMATCH, INVALID_UNICODE }; - iterator operator()(const void* value) { - if (specs_) - check_pointer_type_spec(specs_->type, internal::error_handler()); - write_pointer(value); - return out(); - } -}; +class Utf8Proc { +public: + //! Distinguishes ASCII, Valid UTF8 and Invalid UTF8 strings + static UnicodeType Analyze(const char *s, size_t len, UnicodeInvalidReason *invalid_reason = nullptr, size_t *invalid_pos = nullptr); + //! Performs UTF NFC normalization of string, return value needs to be free'd + static char* Normalize(const char* s, size_t len); + //! Returns whether or not the UTF8 string is valid + static bool IsValid(const char *s, size_t len); + //! Returns the position (in bytes) of the next grapheme cluster + static size_t NextGraphemeCluster(const char *s, size_t len, size_t pos); + //! Returns the position (in bytes) of the previous grapheme cluster + static size_t PreviousGraphemeCluster(const char *s, size_t len, size_t pos); -template FMT_CONSTEXPR bool is_name_start(Char c) { - return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c; -} + //! Transform a codepoint to utf8 and writes it to "c", sets "sz" to the size of the codepoint + static bool CodepointToUtf8(int cp, int &sz, char *c); + //! Returns the codepoint length in bytes when encoded in UTF8 + static int CodepointLength(int cp); + //! Transform a UTF8 string to a codepoint; returns the codepoint and writes the length of the codepoint (in UTF8) to sz + static int32_t UTF8ToCodepoint(const char *c, int &sz); + //! Returns the render width of a single character in a string + static size_t RenderWidth(const char *s, size_t len, size_t pos); + static size_t RenderWidth(const std::string &str); + +}; -// Parses the range [begin, end) as an unsigned integer. This function assumes -// that the range is non-empty and the first character is a digit. -template -FMT_CONSTEXPR int parse_nonnegative_int(const Char*& begin, const Char* end, - ErrorHandler&& eh) { - FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); - if (*begin == '0') { - ++begin; - return 0; - } - unsigned value = 0; - // Convert to unsigned to prevent a warning. - constexpr unsigned max_int = max_value(); - unsigned big = max_int / 10; - do { - // Check for overflow. - if (value > big) { - value = max_int + 1; - break; - } - value = value * 10 + unsigned(*begin - '0'); - ++begin; - } while (begin != end && '0' <= *begin && *begin <= '9'); - if (value > max_int) eh.on_error("number is too big"); - return static_cast(value); } -template class custom_formatter { - private: - using char_type = typename Context::char_type; - basic_format_parse_context& parse_ctx_; - Context& ctx_; +// LICENSE_CHANGE_END - public: - explicit custom_formatter(basic_format_parse_context& parse_ctx, - Context& ctx) - : parse_ctx_(parse_ctx), ctx_(ctx) {} - bool operator()(typename basic_format_arg::handle h) const { - h.format(parse_ctx_, ctx_); - return true; - } +#include - template bool operator()(T) const { return false; } -}; +namespace duckdb { -template -using is_integer = - bool_constant::value && !std::is_same::value && - !std::is_same::value && - !std::is_same::value>; +const idx_t BoxRenderer::SPLIT_COLUMN = idx_t(-1); -template class width_checker { - public: - explicit FMT_CONSTEXPR width_checker(ErrorHandler& eh) : handler_(eh) {} +BoxRenderer::BoxRenderer(BoxRendererConfig config_p) : config(move(config_p)) { +} - template ::value)> - FMT_CONSTEXPR unsigned long long operator()(T value) { - if (is_negative(value)) handler_.on_error("negative width"); - return static_cast(value); - } +string BoxRenderer::ToString(ClientContext &context, const vector &names, const ColumnDataCollection &result) { + std::stringstream ss; + Render(context, names, result, ss); + return ss.str(); +} - template ::value)> - FMT_CONSTEXPR unsigned long long operator()(T) { - handler_.on_error("width is not integer"); - return 0; - } +void BoxRenderer::Print(ClientContext &context, const vector &names, const ColumnDataCollection &result) { + Printer::Print(ToString(context, names, result)); +} - private: - ErrorHandler& handler_; -}; +void BoxRenderer::RenderValue(std::ostream &ss, const string &value, idx_t column_width, + ValueRenderAlignment alignment) { + auto render_width = Utf8Proc::RenderWidth(value); -template class precision_checker { - public: - explicit FMT_CONSTEXPR precision_checker(ErrorHandler& eh) : handler_(eh) {} + const string *render_value = &value; + string small_value; + if (render_width > column_width) { + // the string is too large to fit in this column! + // the size of this column must have been reduced + // figure out how much of this value we can render + idx_t pos = 0; + idx_t current_render_width = config.DOTDOTDOT_LENGTH; + while (pos < value.size()) { + // check if this character fits... + auto char_size = Utf8Proc::RenderWidth(value.c_str(), value.size(), pos); + if (current_render_width + char_size >= column_width) { + // it doesn't! stop + break; + } + // it does! move to the next character + current_render_width += char_size; + pos = Utf8Proc::NextGraphemeCluster(value.c_str(), value.size(), pos); + } + small_value = value.substr(0, pos) + config.DOTDOTDOT; + render_value = &small_value; + render_width = current_render_width; + } + auto padding_count = (column_width - render_width) + 2; + idx_t lpadding; + idx_t rpadding; + switch (alignment) { + case ValueRenderAlignment::LEFT: + lpadding = 1; + rpadding = padding_count - 1; + break; + case ValueRenderAlignment::MIDDLE: + lpadding = padding_count / 2; + rpadding = padding_count - lpadding; + break; + case ValueRenderAlignment::RIGHT: + lpadding = padding_count - 1; + rpadding = 1; + break; + default: + throw InternalException("Unrecognized value renderer alignment"); + } + ss << config.VERTICAL; + ss << string(lpadding, ' '); + ss << *render_value; + ss << string(rpadding, ' '); +} - template ::value)> - FMT_CONSTEXPR unsigned long long operator()(T value) { - if (is_negative(value)) handler_.on_error("negative precision"); - return static_cast(value); - } +string BoxRenderer::RenderType(const LogicalType &type) { + switch (type.id()) { + case LogicalTypeId::TINYINT: + return "int8"; + case LogicalTypeId::SMALLINT: + return "int16"; + case LogicalTypeId::INTEGER: + return "int32"; + case LogicalTypeId::BIGINT: + return "int64"; + case LogicalTypeId::HUGEINT: + return "int128"; + case LogicalTypeId::UTINYINT: + return "uint8"; + case LogicalTypeId::USMALLINT: + return "uint16"; + case LogicalTypeId::UINTEGER: + return "uint32"; + case LogicalTypeId::UBIGINT: + return "uint64"; + case LogicalTypeId::LIST: { + auto child = RenderType(ListType::GetChildType(type)); + return child + "[]"; + } + default: + return StringUtil::Lower(type.ToString()); + } +} - template ::value)> - FMT_CONSTEXPR unsigned long long operator()(T) { - handler_.on_error("precision is not integer"); - return 0; - } +ValueRenderAlignment BoxRenderer::TypeAlignment(const LogicalType &type) { + switch (type.id()) { + case LogicalTypeId::TINYINT: + case LogicalTypeId::SMALLINT: + case LogicalTypeId::INTEGER: + case LogicalTypeId::BIGINT: + case LogicalTypeId::HUGEINT: + case LogicalTypeId::UTINYINT: + case LogicalTypeId::USMALLINT: + case LogicalTypeId::UINTEGER: + case LogicalTypeId::UBIGINT: + case LogicalTypeId::DECIMAL: + case LogicalTypeId::FLOAT: + case LogicalTypeId::DOUBLE: + return ValueRenderAlignment::RIGHT; + default: + return ValueRenderAlignment::LEFT; + } +} - private: - ErrorHandler& handler_; -}; +list BoxRenderer::FetchRenderCollections(ClientContext &context, + const ColumnDataCollection &result, idx_t top_rows, + idx_t bottom_rows) { + auto column_count = result.ColumnCount(); + vector varchar_types; + for (idx_t c = 0; c < column_count; c++) { + varchar_types.emplace_back(LogicalType::VARCHAR); + } + std::list collections; + collections.emplace_back(context, varchar_types); + collections.emplace_back(context, varchar_types); -// A format specifier handler that sets fields in basic_format_specs. -template class specs_setter { - public: - explicit FMT_CONSTEXPR specs_setter(basic_format_specs& specs) - : specs_(specs) {} + auto &top_collection = collections.front(); + auto &bottom_collection = collections.back(); - FMT_CONSTEXPR specs_setter(const specs_setter& other) - : specs_(other.specs_) {} + DataChunk fetch_result; + fetch_result.Initialize(context, result.Types()); - FMT_CONSTEXPR void on_align(align_t align) { specs_.align = align; } - FMT_CONSTEXPR void on_fill(Char fill) { specs_.fill[0] = fill; } - FMT_CONSTEXPR void on_plus() { specs_.sign = sign::plus; } - FMT_CONSTEXPR void on_minus() { specs_.sign = sign::minus; } - FMT_CONSTEXPR void on_space() { specs_.sign = sign::space; } - FMT_CONSTEXPR void on_hash() { specs_.alt = true; } + DataChunk insert_result; + insert_result.Initialize(context, varchar_types); - FMT_CONSTEXPR void on_zero() { - specs_.align = align::numeric; - specs_.fill[0] = Char('0'); - } + // fetch the top rows from the ColumnDataCollection + idx_t chunk_idx = 0; + idx_t row_idx = 0; + while (row_idx < top_rows) { + fetch_result.Reset(); + insert_result.Reset(); + // fetch the next chunk + result.FetchChunk(chunk_idx, fetch_result); + idx_t insert_count = MinValue(fetch_result.size(), top_rows - row_idx); - FMT_CONSTEXPR void on_width(int width) { specs_.width = width; } - FMT_CONSTEXPR void on_precision(int precision) { - specs_.precision = precision; - } - FMT_CONSTEXPR void end_precision() {} + // cast all columns to varchar + for (idx_t c = 0; c < column_count; c++) { + VectorOperations::Cast(context, fetch_result.data[c], insert_result.data[c], insert_count); + } + insert_result.SetCardinality(insert_count); - FMT_CONSTEXPR void on_type(Char type) { - specs_.type = static_cast(type); - } + // construct the render collection + top_collection.Append(insert_result); - protected: - basic_format_specs& specs_; -}; + chunk_idx++; + row_idx += fetch_result.size(); + } -template class numeric_specs_checker { - public: - FMT_CONSTEXPR numeric_specs_checker(ErrorHandler& eh, internal::type arg_type) - : error_handler_(eh), arg_type_(arg_type) {} + // fetch the bottom rows from the ColumnDataCollection + row_idx = 0; + chunk_idx = result.ChunkCount() - 1; + while (row_idx < bottom_rows) { + fetch_result.Reset(); + insert_result.Reset(); + // fetch the next chunk + result.FetchChunk(chunk_idx, fetch_result); + idx_t insert_count = MinValue(fetch_result.size(), bottom_rows - row_idx); - FMT_CONSTEXPR void require_numeric_argument() { - if (!is_arithmetic_type(arg_type_)) - error_handler_.on_error("format specifier requires numeric argument"); - } + // invert the rows + SelectionVector inverted_sel(insert_count); + for (idx_t r = 0; r < insert_count; r++) { + inverted_sel.set_index(r, fetch_result.size() - r - 1); + } - FMT_CONSTEXPR void check_sign() { - require_numeric_argument(); - if (is_integral_type(arg_type_) && arg_type_ != int_type && - arg_type_ != long_long_type && arg_type_ != internal::char_type) { - error_handler_.on_error("format specifier requires signed argument"); - } - } + for (idx_t c = 0; c < column_count; c++) { + Vector slice(fetch_result.data[c], inverted_sel, insert_count); + VectorOperations::Cast(context, slice, insert_result.data[c], insert_count); + } + insert_result.SetCardinality(insert_count); + // construct the render collection + bottom_collection.Append(insert_result); - FMT_CONSTEXPR void check_precision() { - if (is_integral_type(arg_type_) || arg_type_ == internal::pointer_type) - error_handler_.on_error("precision not allowed for this argument type"); - } + chunk_idx--; + row_idx += fetch_result.size(); + } + return collections; +} - private: - ErrorHandler& error_handler_; - internal::type arg_type_; -}; +string ConvertRenderValue(const string &input) { + return StringUtil::Replace(StringUtil::Replace(input, "\n", "\\n"), string("\0", 1), "\\0"); +} -// A format specifier handler that checks if specifiers are consistent with the -// argument type. -template class specs_checker : public Handler { - public: - FMT_CONSTEXPR specs_checker(const Handler& handler, internal::type arg_type) - : Handler(handler), checker_(*this, arg_type) {} +string BoxRenderer::GetRenderValue(ColumnDataRowCollection &rows, idx_t c, idx_t r) { + try { + auto row = rows.GetValue(c, r); + if (row.IsNull()) { + return config.null_value; + } + return ConvertRenderValue(StringValue::Get(row)); + } catch (std::exception &ex) { + return "????INVALID VALUE - " + string(ex.what()) + "?????"; + } +} - FMT_CONSTEXPR specs_checker(const specs_checker& other) - : Handler(other), checker_(*this, other.arg_type_) {} +vector BoxRenderer::ComputeRenderWidths(const vector &names, const ColumnDataCollection &result, + list &collections, idx_t min_width, + idx_t max_width, vector &column_map, idx_t &total_length) { + auto column_count = result.ColumnCount(); + auto &result_types = result.Types(); - FMT_CONSTEXPR void on_align(align_t align) { - if (align == align::numeric) checker_.require_numeric_argument(); - Handler::on_align(align); - } + vector widths; + widths.reserve(column_count); + for (idx_t c = 0; c < column_count; c++) { + auto name_width = Utf8Proc::RenderWidth(ConvertRenderValue(names[c])); + auto type_width = Utf8Proc::RenderWidth(RenderType(result_types[c])); + widths.push_back(MaxValue(name_width, type_width)); + } - FMT_CONSTEXPR void on_plus() { - checker_.check_sign(); - Handler::on_plus(); - } + // now iterate over the data in the render collection and find out the true max width + for (auto &collection : collections) { + for (auto &chunk : collection.Chunks()) { + for (idx_t c = 0; c < column_count; c++) { + auto string_data = FlatVector::GetData(chunk.data[c]); + for (idx_t r = 0; r < chunk.size(); r++) { + string render_value; + if (FlatVector::IsNull(chunk.data[c], r)) { + render_value = config.null_value; + } else { + render_value = ConvertRenderValue(string_data[r].GetString()); + } + auto render_width = Utf8Proc::RenderWidth(render_value); + widths[c] = MaxValue(render_width, widths[c]); + } + } + } + } - FMT_CONSTEXPR void on_minus() { - checker_.check_sign(); - Handler::on_minus(); - } + // figure out the total length + // we start off with a pipe (|) + total_length = 1; + for (idx_t c = 0; c < widths.size(); c++) { + // each column has a space at the beginning, and a space plus a pipe (|) at the end + // hence + 3 + total_length += widths[c] + 3; + } + if (total_length < min_width) { + // if there are hidden rows we should always display that + // stretch up the first column until we have space to show the row count + widths[0] += min_width - total_length; + total_length = min_width; + } + // now we need to constrain the length + unordered_set pruned_columns; + if (total_length > max_width) { + // before we remove columns, check if we can just reduce the size of columns + for (auto &w : widths) { + if (w > config.max_col_width) { + auto max_diff = w - config.max_col_width; + if (total_length - max_diff <= max_width) { + // if we reduce the size of this column we fit within the limits! + // reduce the width exactly enough so that the box fits + w -= total_length - max_width; + total_length = max_width; + break; + } else { + // reducing the width of this column does not make the result fit + // reduce the column width by the maximum amount anyway + w = config.max_col_width; + total_length -= max_diff; + } + } + } - FMT_CONSTEXPR void on_space() { - checker_.check_sign(); - Handler::on_space(); - } + if (total_length > max_width) { + // the total length is still too large + // we need to remove columns! + // first, we add 6 characters to the total length + // this is what we need to add the "..." in the middle + total_length += 3 + config.DOTDOTDOT_LENGTH; + // now select columns to prune + // we select columns in zig-zag order starting from the middle + // e.g. if we have 10 columns, we remove #5, then #4, then #6, then #3, then #7, etc + int64_t offset = 0; + while (total_length > max_width) { + idx_t c = column_count / 2 + offset; + total_length -= widths[c] + 3; + pruned_columns.insert(c); + if (offset >= 0) { + offset = -offset - 1; + } else { + offset = -offset; + } + } + } + } - FMT_CONSTEXPR void on_hash() { - checker_.require_numeric_argument(); - Handler::on_hash(); - } + bool added_split_column = false; + vector new_widths; + for (idx_t c = 0; c < column_count; c++) { + if (pruned_columns.find(c) == pruned_columns.end()) { + column_map.push_back(c); + new_widths.push_back(widths[c]); + } else { + if (!added_split_column) { + // "..." + column_map.push_back(SPLIT_COLUMN); + new_widths.push_back(config.DOTDOTDOT_LENGTH); + added_split_column = true; + } + } + } + return new_widths; +} - FMT_CONSTEXPR void on_zero() { - checker_.require_numeric_argument(); - Handler::on_zero(); - } +void BoxRenderer::RenderHeader(const vector &names, const vector &result_types, + const vector &column_map, const vector &widths, + const vector &boundaries, idx_t total_length, bool has_results, + std::ostream &ss) { + auto column_count = column_map.size(); + // render the top line + ss << config.LTCORNER; + idx_t column_index = 0; + for (idx_t k = 0; k < total_length - 2; k++) { + if (column_index + 1 < column_count && k == boundaries[column_index]) { + ss << config.TMIDDLE; + column_index++; + } else { + ss << config.HORIZONTAL; + } + } + ss << config.RTCORNER; + ss << std::endl; - FMT_CONSTEXPR void end_precision() { checker_.check_precision(); } + // render the header names + for (idx_t c = 0; c < column_count; c++) { + auto column_idx = column_map[c]; + string name; + if (column_idx == SPLIT_COLUMN) { + name = config.DOTDOTDOT; + } else { + name = ConvertRenderValue(names[column_idx]); + } + RenderValue(ss, name, widths[c]); + } + ss << config.VERTICAL; + ss << std::endl; - private: - numeric_specs_checker checker_; -}; + // render the types + for (idx_t c = 0; c < column_count; c++) { + auto column_idx = column_map[c]; + auto type = column_idx == SPLIT_COLUMN ? "" : RenderType(result_types[column_idx]); + RenderValue(ss, type, widths[c]); + } + ss << config.VERTICAL; + ss << std::endl; -template